text
stringlengths
64
2.99M
(cid:12)(cid:93)(cid:62)(cid:112)(cid:112)(cid:83)(cid:80)(cid:123)(cid:83)(cid:96)(cid:81)(cid:2)(cid:3)(cid:96)(cid:73)(cid:111)(cid:99)(cid:83)(cid:73) (cid:12)(cid:99)(cid:73)(cid:75)(cid:2)(cid:95)(cid:75)(cid:114)(cid:111)(cid:83)(cid:71)(cid:112) (cid:133) (cid:33)(cid:62)(cid:93)(cid:121)(cid:62)(cid:111)(cid:75)(cid:2)(cid:45)(cid:111)(cid:75)(cid:73)(cid:83)(cid:71)(cid:114)(cid:83)(cid:99)(cid:96) (cid:12)(cid:99)(cid:73)(cid:75)(cid:2)(cid:95)(cid:75)(cid:114)(cid:111)(cid:83)(cid:71)(cid:112) (cid:133) (cid:83)(cid:49)(cid:75)(cid:56)(cid:12) (cid:135) (cid:50)(cid:99)(cid:92)(cid:75)(cid:96)(cid:83)(cid:126)(cid:75)(cid:111) (cid:133) (cid:49)(cid:99)(cid:115)(cid:111)(cid:71)(cid:75)(cid:112)(cid:2)(cid:62)(cid:96)(cid:73)(cid:2)(cid:49)(cid:83)(cid:96)(cid:92)(cid:112) (cid:12)(cid:62)(cid:93)(cid:93)(cid:2)(cid:81)(cid:111)(cid:62)(cid:108)(cid:82) (cid:133) (cid:11)(cid:115)(cid:80)(cid:80)(cid:75)(cid:111)(cid:2)(cid:36)(cid:120)(cid:75)(cid:111)(cid:111)(cid:115)(cid:96) (cid:50)(cid:99)(cid:92)(cid:75)(cid:96)(cid:83)(cid:126)(cid:75)(cid:111) (cid:133) (cid:3)(cid:49)(cid:50)(cid:245) (cid:134) (cid:50)(cid:99)(cid:92)(cid:75)(cid:96)(cid:83)(cid:126)(cid:75)(cid:111) (cid:139) (cid:56)(cid:115)(cid:93)(cid:96)(cid:75) (cid:12)(cid:111) (cid:99)(cid:62) (cid:95)(cid:70)(cid:83) (cid:95)(cid:93)(cid:83)(cid:114) (cid:83)(cid:123) (cid:114)(cid:112)(cid:2)(cid:21) (cid:2)(cid:14)(cid:83)(cid:122) (cid:75)(cid:83)(cid:96) (cid:114)(cid:75)(cid:81) (cid:71)(cid:196) (cid:114)(cid:24) (cid:83)(cid:96) (cid:99)(cid:73) (cid:96)(cid:115)(cid:71)(cid:83)(cid:96)(cid:81) (cid:45)(cid:111)(cid:75)(cid:73) (cid:50) (cid:12)(cid:83) (cid:99) (cid:99)(cid:71) (cid:92) (cid:73)(cid:114) (cid:75)(cid:83) (cid:75)(cid:99) (cid:96) (cid:2)(cid:96) (cid:95)(cid:83)(cid:126) (cid:75)(cid:75) (cid:114)(cid:111) (cid:111)(cid:83)(cid:71)(cid:112) (cid:134) (cid:133) (cid:56)(cid:115) (cid:14)(cid:93) (cid:75)(cid:96) (cid:114)(cid:75) (cid:75)(cid:111) (cid:71)(cid:62) (cid:114)(cid:70) (cid:83)(cid:99)(cid:83)(cid:93) (cid:96)(cid:83)(cid:114)(cid:123) (cid:24) (cid:45)(cid:95) (cid:111)(cid:99)(cid:62) (cid:81)(cid:81) (cid:111)(cid:75) (cid:62)(cid:95)(cid:2)(cid:112)(cid:93)(cid:83)(cid:71)(cid:75)(cid:112) (cid:134) (cid:134) (cid:56)(cid:115) (cid:45)(cid:111)(cid:93)(cid:96) (cid:75)(cid:75) (cid:73)(cid:111) (cid:83)(cid:62) (cid:71)(cid:114)(cid:70) (cid:83)(cid:99)(cid:83)(cid:93) (cid:96)(cid:83)(cid:114)(cid:123) (cid:12) (cid:3)(cid:99) (cid:70)(cid:73) (cid:112)(cid:114)(cid:75) (cid:111)(cid:2) (cid:62)(cid:95) (cid:71)(cid:75) (cid:114)(cid:2)(cid:114) (cid:49)(cid:111) (cid:123)(cid:83)(cid:71) (cid:96)(cid:112) (cid:114)(cid:62)(cid:122)(cid:2)(cid:50)(cid:111)(cid:75)(cid:75)(cid:2)(cid:207)(cid:3)(cid:49)(cid:50)(cid:208) (cid:137) (cid:134)
(cid:12)(cid:62)(cid:93)(cid:93)(cid:2)(cid:81)(cid:111)(cid:62)(cid:108)(cid:82) (cid:135) (cid:112)(cid:49)(cid:123)(cid:56)(cid:12) (cid:134) (cid:49)(cid:123)(cid:112)(cid:114)(cid:75)(cid:95)(cid:2)(cid:14)(cid:75)(cid:108)(cid:75)(cid:96)(cid:73)(cid:75)(cid:96)(cid:71)(cid:75)(cid:2)(cid:22)(cid:111)(cid:62)(cid:108)(cid:82)(cid:2)(cid:207)(cid:49)(cid:14)(cid:22)(cid:208) (cid:133) (cid:12)(cid:99)(cid:96)(cid:114)(cid:111)(cid:62)(cid:71)(cid:114)(cid:2)(cid:22)(cid:111)(cid:62)(cid:108)(cid:82) (cid:134) (cid:24)(cid:96)(cid:114)(cid:75)(cid:111)(cid:108)(cid:111)(cid:99)(cid:71)(cid:75)(cid:73)(cid:115)(cid:111)(cid:62)(cid:93)(cid:2)(cid:12)(cid:99)(cid:96)(cid:114)(cid:111)(cid:99)(cid:93)(cid:2)(cid:21)(cid:93)(cid:99)(cid:121)(cid:2)(cid:22)(cid:111)(cid:62)(cid:108)(cid:82)(cid:2)(cid:207)(cid:24)(cid:12)(cid:21)(cid:22)(cid:208) (cid:135) (cid:12)(cid:62)(cid:93)(cid:93)(cid:2)(cid:81)(cid:111)(cid:62)(cid:108)(cid:82) (cid:133) (cid:49)(cid:83)(cid:95)(cid:108)(cid:93)(cid:83)(cid:127)(cid:75)(cid:73)(cid:2)(cid:12)(cid:45)(cid:22)(cid:2)(cid:207)(cid:49)(cid:12)(cid:45)(cid:22)(cid:208) (cid:134) (cid:33)(cid:62)(cid:93)(cid:121)(cid:62)(cid:111)(cid:75) (cid:12)(cid:99)(cid:96)(cid:114)(cid:75)(cid:122)(cid:114)(cid:115)(cid:62)(cid:93)(cid:2)(cid:3)(cid:45)(cid:24)(cid:2)(cid:14)(cid:75)(cid:108)(cid:75)(cid:96)(cid:73)(cid:75)(cid:96)(cid:71)(cid:123)(cid:2)(cid:22)(cid:111)(cid:62)(cid:108)(cid:82)(cid:2)(cid:207)(cid:12)(cid:3)(cid:14)(cid:22)(cid:208) (cid:134) (cid:50)(cid:99)(cid:92)(cid:75)(cid:96)(cid:2)(cid:22)(cid:111)(cid:62)(cid:108)(cid:82) (cid:134) (cid:12)(cid:93)(cid:62)(cid:112)(cid:112)(cid:83)(cid:127)(cid:71)(cid:62)(cid:114)(cid:83)(cid:99)(cid:96) (cid:12)(cid:99)(cid:96)(cid:114)(cid:111)(cid:99)(cid:93)(cid:2)(cid:21)(cid:93)(cid:99)(cid:121)(cid:2)(cid:22)(cid:111)(cid:62)(cid:108)(cid:82)(cid:2)(cid:207)(cid:12)(cid:21)(cid:22)(cid:208) (cid:133) (cid:12)(cid:99)(cid:96)(cid:114)(cid:75)(cid:122)(cid:114)(cid:115)(cid:62)(cid:93)(cid:2)(cid:45)(cid:75)(cid:111)(cid:95)(cid:83)(cid:112)(cid:112)(cid:83)(cid:99)(cid:96)(cid:2)(cid:14)(cid:75)(cid:108)(cid:75)(cid:96)(cid:73)(cid:75)(cid:96)(cid:71)(cid:123)(cid:2)(cid:22)(cid:111)(cid:62)(cid:108)(cid:82)(cid:2)(cid:207)(cid:12)(cid:45)(cid:14)(cid:22)(cid:208) (cid:133) (cid:48)(cid:75)(cid:81)(cid:115)(cid:93)(cid:62)(cid:111)(cid:2)(cid:16)(cid:122)(cid:108)(cid:111)(cid:75)(cid:112)(cid:112)(cid:83)(cid:99)(cid:96) (cid:133) (cid:12)(cid:62)(cid:93)(cid:93)(cid:2)(cid:81)(cid:111)(cid:62)(cid:108)(cid:82) (cid:133) (cid:24)(cid:95)(cid:62)(cid:81)(cid:75) (cid:133) (cid:3)(cid:70)(cid:112)(cid:114)(cid:111)(cid:62)(cid:71)(cid:114)(cid:2)(cid:49)(cid:123)(cid:96)(cid:114)(cid:62)(cid:122)(cid:2)(cid:50)(cid:111)(cid:75)(cid:75)(cid:2)(cid:207)(cid:3)(cid:49)(cid:50)(cid:208) (cid:134)
(cid:33)(cid:62)(cid:93)(cid:121)(cid:62)(cid:111)(cid:75)(cid:2)(cid:14)(cid:75)(cid:114)(cid:75)(cid:71)(cid:114)(cid:83)(cid:99)(cid:96) (cid:3)(cid:108)(cid:108)(cid:93)(cid:83)(cid:71)(cid:62)(cid:114)(cid:83)(cid:99)(cid:96)(cid:2)(cid:24)(cid:96)(cid:80)(cid:99)(cid:111)(cid:95)(cid:62)(cid:114)(cid:83)(cid:99)(cid:96) (cid:133) (cid:71)(cid:99)(cid:73)(cid:75)(cid:11)(cid:16)(cid:48)(cid:50) (cid:133) (cid:56)(cid:115)(cid:93)(cid:96)(cid:75)(cid:111)(cid:62)(cid:70)(cid:83)(cid:93)(cid:83)(cid:114)(cid:123) (cid:50)(cid:99)(cid:92)(cid:75)(cid:96)(cid:83)(cid:126)(cid:75)(cid:111) (cid:134) (cid:49)(cid:75)(cid:95)(cid:62)(cid:96)(cid:114)(cid:83)(cid:71)(cid:2)(cid:22)(cid:111)(cid:62)(cid:108)(cid:82) (cid:133) (cid:48)(cid:75)(cid:108)(cid:62)(cid:83)(cid:111) (cid:3)(cid:45)(cid:24)(cid:2)(cid:12)(cid:62)(cid:93)(cid:93)(cid:112) (cid:133) (cid:11)(cid:45)(cid:16)(cid:2)(cid:49)(cid:115)(cid:70)(cid:121)(cid:99)(cid:111)(cid:73)(cid:2)(cid:50)(cid:99)(cid:92)(cid:75)(cid:96)(cid:83)(cid:126)(cid:62)(cid:114)(cid:83)(cid:99)(cid:96) (cid:133) (cid:50)(cid:99)(cid:92)(cid:75)(cid:96)(cid:83)(cid:126)(cid:75)(cid:111) (cid:133) (cid:12)(cid:111)(cid:115)(cid:71)(cid:83)(cid:62)(cid:93)(cid:2)(cid:14)(cid:62)(cid:114)(cid:62)(cid:2)(cid:21)(cid:93)(cid:99)(cid:121)(cid:2)(cid:22)(cid:111)(cid:62)(cid:108)(cid:82)(cid:2)(cid:207)(cid:12)(cid:14)(cid:21)(cid:22)(cid:208) (cid:133) (cid:33)(cid:62)(cid:93)(cid:83)(cid:71)(cid:83)(cid:99)(cid:115)(cid:112)(cid:2)(cid:45)(cid:62)(cid:71)(cid:92)(cid:62)(cid:81)(cid:75) (cid:3)(cid:70)(cid:112)(cid:114)(cid:111)(cid:62)(cid:71)(cid:114)(cid:2)(cid:49)(cid:123)(cid:96)(cid:114)(cid:62)(cid:122)(cid:2)(cid:50)(cid:111)(cid:75)(cid:75)(cid:2)(cid:207)(cid:3)(cid:49)(cid:50)(cid:208) (cid:133) (cid:45)(cid:111)(cid:99)(cid:81)(cid:111)(cid:62)(cid:95)(cid:2)(cid:22)(cid:111)(cid:62)(cid:108)(cid:82) (cid:133) (cid:14)(cid:75)(cid:114)(cid:75)(cid:71)(cid:114)(cid:83)(cid:99)(cid:96)
(cid:47) (cid:81)(cid:72) (cid:111)(cid:62)(cid:74) (cid:108)(cid:72) (cid:82)(cid:81) (cid:211)(cid:71) (cid:70)(cid:62)(cid:112)(cid:75)(cid:73) (cid:12) (cid:12)(cid:99) (cid:99)(cid:96) (cid:96)(cid:114) (cid:114)(cid:75) (cid:75)(cid:122) (cid:122)(cid:114) (cid:114)(cid:115) (cid:115)(cid:62) (cid:62)(cid:93) (cid:93)(cid:2) (cid:2)(cid:49) (cid:24)(cid:12)(cid:99) (cid:21)(cid:115) (cid:22)(cid:111) (cid:2)(cid:71) (cid:207)(cid:75) (cid:12)(cid:2) (cid:24)(cid:62) (cid:12)(cid:96) (cid:21)(cid:22)(cid:73)(cid:2) (cid:208)(cid:49)(cid:83)(cid:96)(cid:92)(cid:2)(cid:14)(cid:75)(cid:108)(cid:75)(cid:96)(cid:73)(cid:75)(cid:96)(cid:71)(cid:123)(cid:2)(cid:22)(cid:111)(cid:62)(cid:108)(cid:82)(cid:2)(cid:207)(cid:12)(cid:49)(cid:49)(cid:14)(cid:22)(cid:208) (cid:133) (cid:133) (cid:56)(cid:62)(cid:93)(cid:115)(cid:75)(cid:2)(cid:21)(cid:93)(cid:99)(cid:121)(cid:2)(cid:22)(cid:111)(cid:62)(cid:108)(cid:82)(cid:2)(cid:207)(cid:56)(cid:21)(cid:22)(cid:208) (cid:133) (cid:12)(cid:111)(cid:123)(cid:108) (cid:33)(cid:114) (cid:83)(cid:99) (cid:112)(cid:81) (cid:115)(cid:111) (cid:112)(cid:62) (cid:75)(cid:108)(cid:82)(cid:123) (cid:3)(cid:70)(cid:112)(cid:114)(cid:111)(cid:62)(cid:71)(cid:114)(cid:2)(cid:49)(cid:123)(cid:96)(cid:114)(cid:62)(cid:122)(cid:2)(cid:50)(cid:111)(cid:75)(cid:75)(cid:2)(cid:207)(cid:3)(cid:49)(cid:50)(cid:208) (cid:133) (cid:114)(cid:111)(cid:75)(cid:75)(cid:211)(cid:70)(cid:62)(cid:112)(cid:75)(cid:73) (cid:49)(cid:123)(cid:112)(cid:114)(cid:75)(cid:95)(cid:2)(cid:14)(cid:75)(cid:108)(cid:75)(cid:96)(cid:73)(cid:75)(cid:96)(cid:71)(cid:75)(cid:2)(cid:22)(cid:111)(cid:62)(cid:108)(cid:82)(cid:2)(cid:207)(cid:49)(cid:14)(cid:22)(cid:208) (cid:133) (cid:12)(cid:99)(cid:96)(cid:114)(cid:111)(cid:62)(cid:71)(cid:114)(cid:2)(cid:49)(cid:96)(cid:83)(cid:108)(cid:108)(cid:75)(cid:114) (cid:133)
(cid:95) (cid:93)(cid:75)(cid:122)(cid:83)(cid:112) (cid:83)(cid:71)(cid:71)(cid:75) (cid:62)(cid:93)(cid:93)(cid:93)(cid:62)(cid:96)(cid:75)(cid:99)(cid:115)(cid:112) (cid:12)(cid:56) (cid:93)(cid:115) (cid:62)(cid:93) (cid:112)(cid:96) (cid:112)(cid:75) (cid:83)(cid:127)(cid:111)(cid:62) (cid:71)(cid:70) (cid:62)(cid:83) (cid:114)(cid:93) (cid:83)(cid:83) (cid:99)(cid:114)(cid:123) (cid:96) (cid:3) (cid:50)(cid:99)(cid:70) (cid:92)(cid:112) (cid:75)(cid:114)(cid:111) (cid:96)(cid:62) (cid:83)(cid:71) (cid:126)(cid:75)(cid:114)(cid:2) (cid:111)(cid:49)(cid:123)(cid:96)(cid:114)(cid:62)(cid:122)(cid:2)(cid:50)(cid:111)(cid:75)(cid:75)(cid:2)(cid:207)(cid:3)(cid:49)(cid:50)(cid:208) (cid:133) (cid:133) (cid:49)(cid:93)(cid:83)(cid:71)(cid:75)(cid:2)(cid:45)(cid:111)(cid:99)(cid:108)(cid:75)(cid:111)(cid:114)(cid:123)(cid:2)(cid:22)(cid:111)(cid:62)(cid:108)(cid:82)(cid:2)(cid:207)(cid:49)(cid:45)(cid:22)(cid:208) (cid:133) (cid:48) (cid:14)(cid:75) (cid:75)(cid:75) (cid:114)(cid:96) (cid:75)(cid:114) (cid:71)(cid:111) (cid:114)(cid:62) (cid:83)(cid:96) (cid:99)(cid:71) (cid:96)(cid:123) (cid:50)(cid:99)(cid:92)(cid:75)(cid:96)(cid:83)(cid:126)(cid:75)(cid:111) (cid:133) (cid:56)(cid:115)(cid:93)(cid:96)(cid:75)(cid:111)(cid:62)(cid:70)(cid:83)(cid:93)(cid:83)(cid:114)(cid:123) (cid:12)(cid:99)(cid:73)(cid:75)(cid:2)(cid:3)(cid:81)(cid:81)(cid:111)(cid:75)(cid:81)(cid:62)(cid:114)(cid:75)(cid:2)(cid:22)(cid:111)(cid:62)(cid:108)(cid:82)(cid:2)(cid:207)(cid:12)(cid:3)(cid:22)(cid:208) (cid:133) (cid:45)(cid:111)(cid:99)(cid:108)(cid:62)(cid:81)(cid:62)(cid:114)(cid:83)(cid:99)(cid:96)(cid:2)(cid:12)(cid:82)(cid:62)(cid:83)(cid:96) (cid:133) (cid:31)(cid:99)(cid:71)(cid:62)(cid:93)(cid:83)(cid:126)(cid:62)(cid:114)(cid:83)(cid:99)(cid:96) (cid:12)(cid:99)(cid:73)(cid:75)(cid:2)(cid:45)(cid:111)(cid:99)(cid:108)(cid:75)(cid:111)(cid:114)(cid:123)(cid:2)(cid:22)(cid:111)(cid:62)(cid:108)(cid:82)(cid:2)(cid:207)(cid:12)(cid:45)(cid:22)(cid:208) (cid:133) (cid:33)(cid:62)(cid:93)(cid:83)(cid:71)(cid:83)(cid:99)(cid:115)(cid:112)(cid:2)(cid:12)(cid:99)(cid:73)(cid:75) (cid:3)(cid:70)(cid:112)(cid:114)(cid:111)(cid:62)(cid:71)(cid:114)(cid:2)(cid:49)(cid:123)(cid:96)(cid:114)(cid:62)(cid:122)(cid:2)(cid:50)(cid:111)(cid:75)(cid:75)(cid:2)(cid:207)(cid:3)(cid:49)(cid:50)(cid:208) (cid:133) (cid:56)(cid:115)(cid:93)(cid:96)(cid:75)(cid:111)(cid:62)(cid:70)(cid:83)(cid:93)(cid:83)(cid:114)(cid:123) (cid:45)(cid:111)(cid:99)(cid:81)(cid:111)(cid:62)(cid:95)(cid:2)(cid:14)(cid:75)(cid:108)(cid:75)(cid:96)(cid:73)(cid:75)(cid:96)(cid:71)(cid:75)(cid:2)(cid:22)(cid:111)(cid:62)(cid:108)(cid:82)(cid:2)(cid:207)(cid:45)(cid:14)(cid:22)(cid:208) (cid:133)
(cid:49)(cid:75)(cid:71)(cid:115)(cid:111)(cid:83)(cid:114)(cid:123) (cid:3)(cid:70)(cid:112)(cid:114)(cid:111)(cid:62)(cid:71)(cid:114)(cid:2)(cid:49)(cid:123)(cid:96)(cid:114)(cid:62)(cid:122)(cid:2)(cid:50)(cid:111)(cid:75)(cid:75)(cid:2)(cid:207)(cid:3)(cid:49)(cid:50)(cid:208) (cid:133) (cid:14)(cid:75)(cid:99)(cid:70)(cid:80)(cid:115)(cid:112)(cid:71)(cid:62)(cid:114)(cid:83)(cid:99)(cid:96) (cid:12)(cid:99)(cid:73)(cid:75)(cid:2)(cid:95)(cid:75)(cid:114)(cid:111)(cid:83)(cid:71)(cid:112) (cid:133) (cid:16)(cid:122)(cid:114)(cid:111)(cid:62)(cid:108)(cid:99)(cid:93)(cid:62)(cid:114)(cid:83)(cid:99)(cid:96) (cid:3)(cid:96)(cid:62)(cid:93)(cid:123)(cid:112)(cid:83)(cid:112) (cid:56)(cid:115)(cid:93)(cid:96)(cid:75)(cid:111)(cid:62)(cid:70)(cid:93)(cid:75)(cid:2)(cid:12)(cid:99)(cid:73)(cid:75) (cid:3)(cid:70)(cid:112)(cid:114)(cid:111)(cid:62)(cid:71)(cid:114)(cid:2)(cid:49)(cid:123)(cid:96)(cid:114)(cid:62)(cid:122)(cid:2)(cid:50)(cid:111)(cid:75)(cid:75)(cid:2)(cid:207)(cid:3)(cid:49)(cid:50)(cid:208) (cid:133) (cid:50)(cid:99)(cid:92)(cid:75)(cid:96)(cid:83)(cid:126)(cid:75)(cid:111) (cid:135) (cid:33)(cid:62)(cid:93)(cid:83)(cid:71)(cid:83)(cid:99)(cid:115)(cid:112)(cid:2)(cid:12)(cid:99)(cid:73)(cid:75) (cid:3)(cid:70)(cid:112)(cid:114)(cid:111)(cid:62)(cid:71)(cid:114)(cid:2)(cid:49)(cid:123)(cid:96)(cid:114)(cid:62)(cid:122)(cid:2)(cid:50)(cid:111)(cid:75)(cid:75)(cid:2)(cid:207)(cid:3)(cid:49)(cid:50)(cid:208) (cid:133) (cid:12)(cid:93)(cid:99)(cid:96)(cid:75)(cid:2)(cid:14)(cid:75)(cid:114)(cid:75)(cid:71)(cid:114)(cid:83)(cid:99)(cid:96) (cid:3)(cid:70)(cid:112)(cid:114)(cid:111)(cid:62)(cid:71)(cid:114)(cid:2)(cid:49)(cid:123)(cid:96)(cid:114)(cid:62)(cid:122)(cid:2)(cid:50)(cid:111)(cid:75)(cid:75)(cid:2)(cid:207)(cid:3)(cid:49)(cid:50)(cid:208) (cid:133)
(cid:14)(cid:75)(cid:114)(cid:75)(cid:71)(cid:49) (cid:114)(cid:75) (cid:83)(cid:99)(cid:71) (cid:96)(cid:115) (cid:196)(cid:111)(cid:83) (cid:24)(cid:114) (cid:73)(cid:123) (cid:75)(cid:2)(cid:45) (cid:96)(cid:62) (cid:114)(cid:114) (cid:83)(cid:127)(cid:71)(cid:82) (cid:71)(cid:62)(cid:114)(cid:83)(cid:99)(cid:96) (cid:3)(cid:70)(cid:112)(cid:114)(cid:111)(cid:62)(cid:71)(cid:114)(cid:2)(cid:49)(cid:123)(cid:96)(cid:114)(cid:62)(cid:122)(cid:2)(cid:50)(cid:111)(cid:75)(cid:75)(cid:2)(cid:207)(cid:3)(cid:49)(cid:50)(cid:208) (cid:133) (cid:51)(cid:96)(cid:108)(cid:12) (cid:111)(cid:93) (cid:99)(cid:62) (cid:114)(cid:112) (cid:75)(cid:112) (cid:71)(cid:83) (cid:114)(cid:127) (cid:75)(cid:71) (cid:73)(cid:62) (cid:2)(cid:114) (cid:3)(cid:83)(cid:99) (cid:45)(cid:96) (cid:24) (cid:12)(cid:99)(cid:73)(cid:75)(cid:2)(cid:95)(cid:75)(cid:114)(cid:111)(cid:83)(cid:71)(cid:112) (cid:133) (cid:24)(cid:96)(cid:90)(cid:75) (cid:14)(cid:71) (cid:75)(cid:114)(cid:83) (cid:114)(cid:99) (cid:75)(cid:96) (cid:71)(cid:114)(cid:2)(cid:3) (cid:83)(cid:99)(cid:114) (cid:96)(cid:114)(cid:62)(cid:71)(cid:92) (cid:12)(cid:99)(cid:73)(cid:75)(cid:2)(cid:95)(cid:75)(cid:114)(cid:111)(cid:83)(cid:71)(cid:112) (cid:133) (cid:12)(cid:99)(cid:73)(cid:75)(cid:2)(cid:95)(cid:75)(cid:114)(cid:111)(cid:83)(cid:71)(cid:112) (cid:133) (cid:56)(cid:115)(cid:93)(cid:96)(cid:75)(cid:111)(cid:62)(cid:70)(cid:83)(cid:93)(cid:83)(cid:114)(cid:123)(cid:2)(cid:14)(cid:83)(cid:112)(cid:71)(cid:99)(cid:120)(cid:75)(cid:111)(cid:123) (cid:3)(cid:70)(cid:112)(cid:114)(cid:111)(cid:62)(cid:71)(cid:114)(cid:2)(cid:49)(cid:123)(cid:96)(cid:114)(cid:62)(cid:122)(cid:2)(cid:50)(cid:111)(cid:75)(cid:75)(cid:2)(cid:207)(cid:3)(cid:49)(cid:50)(cid:208) (cid:133) (cid:50)(cid:99)(cid:92)(cid:75)(cid:96)(cid:83)(cid:126)(cid:75)(cid:111) (cid:133) Fig.7. Relationshipbetweenrepresentationsandtasks Figure7depictstherelationshipsbetweenrepresentationsandcybersecuritytasks.ASTsandtokenizerswere the two representations most commonly used for vulnerability detection. Although AST and tokenizers werethemostusedrepresentations,wealsofoundthatvulnerabilitydetectiontechniquesmostlyusedgraph- basedrepresentations.Graph-basedrepresentationswerealsopopularforothercybersecuritytasks,namely malicious code localization, malware detection, vulnerability localization, vulnerability analysis, ACMComput.Surv.,Vol.37,No.4,Article111.Publicationdate:March2024.111:20 • BeatriceCasey,JoannaC.S.Santos,andGeorgePerry malwareclassification,andvulnerabilityextrapolation.Amongthegraph-basedrepresentations,CFGisthe mostpopularone.Thiscouldbebecauseitdetailstheprogram’sexecution￿ow,whichcanbeusefulin￿nding whetheravulnerabilitywouldoccurduetothestructureoftheprogram.DFGisanotherpopularrepresentation, particularlyforvulnerabilitydetection.SimilarlytoCFGs,DFGsdetailthe￿owoftheprogram,althoughthey detailthedata￿ow.Thiscanalsobeusefulinvulnerabilitydetectionbecauseifaharmfuldatainputreachesa securitysensitiveprogrampoint,thenavulnerabilitywilllikelyoccur. Codegadgets,iSeVC,andsSyVCwererepresentationsthatwerespeci￿callymadeforvulnerabilitydetection techniques.ICFGs,callgraphs,andCADGsarethepreferredrepresentationformalwaredetection.ICFGs describethecompletecontrol￿owacrossaprogram,andCADGsarederivedfromaversionof ICFGsthat includecontext.Bothoftheserepresentationsprovideinsightstopotentialsecurityrelatedinvocations,which couldallowmalwaretoa￿ectthesystem. RQ2Findings: Sincevulnerabilitydetectionisthemostpopulartask,itisthetaskthatmostrepresentationsareusedfor. • Certain representations (i.e., iSeVC, sSyVC, and code gadgets) were created for the speci￿c task of • vulnerabilitydetectionandarethereforeonlyusedforthattask. Callgraphs,ICFGsandCADGsarethepreferredrepresentationformalwaredetection. • 7 RQ3:WHATCYBERSECURITYTASKSARECOVEREDBYTHEML-BASEDTECHNIQUES?
Tobetterunderstandthetypesoftasksthatarecoveredbythetechniquesdiscussedinthispaper,wesorted thedi￿erenttasksto￿tintothe9disciplinesoftheRationalUni￿edProcess(RUP)cycle[198].TheRUPcycle isasoftwaredevelopmentprocessframeworkthatallowssoftwaredeveloperstobetterorganizeandplanthe developmentprocess.Inthisquestion,wesortedtheuniquecybersecuritytasksfoundfromoursearchintothe ninemainwork￿owsoftheRUPcycle:businessmodeling,requirements,analysisanddesign,implementation, test,deployment,con￿gurationandchangemanagement,projectmanagement,andenvironment.Weobserved thatthecybersecuritytasksonly￿tinto5outofthe9categories.Thosecategoriesbeing:analysisanddesign, con￿gurationandchangemanagement,environment,implementation,andtest.Figure8depictshowthedi￿erent cybersecuritytasks￿tintothese5disciplinesfromtheRUPcycle. (cid:56)(cid:115)(cid:93)(cid:96)(cid:75)(cid:111)(cid:62)(cid:70)(cid:83)(cid:93)(cid:83)(cid:114)(cid:123)(cid:2)(cid:14)(cid:75)(cid:114)(cid:75)(cid:71)(cid:114)(cid:83)(cid:99)(cid:96) (cid:139)(cid:137) (cid:56)(cid:115)(cid:93)(cid:96)(cid:75)(cid:111)(cid:62)(cid:70)(cid:83)(cid:93)(cid:83)(cid:114)(cid:123)(cid:2)(cid:31)(cid:99)(cid:71)(cid:62)(cid:93)(cid:83)(cid:126)(cid:62)(cid:114)(cid:83)(cid:99)(cid:96) (cid:133) (cid:33)(cid:62)(cid:93)(cid:121)(cid:62)(cid:111)(cid:75)(cid:2)(cid:14)(cid:75)(cid:114)(cid:75)(cid:71)(cid:114)(cid:83)(cid:99)(cid:96) (cid:133)(cid:132) (cid:56)(cid:115)(cid:93)(cid:96)(cid:75)(cid:111)(cid:62)(cid:70)(cid:83)(cid:93)(cid:83)(cid:114)(cid:123)(cid:2)(cid:45)(cid:111)(cid:75)(cid:73)(cid:83)(cid:71)(cid:114)(cid:83)(cid:99)(cid:96) (cid:133)(cid:136) (cid:56)(cid:115)(cid:93)(cid:96)(cid:75)(cid:111)(cid:62)(cid:70)(cid:83)(cid:93)(cid:83)(cid:114)(cid:123)(cid:2)(cid:16)(cid:122)(cid:114)(cid:111)(cid:62)(cid:108)(cid:99)(cid:93)(cid:62)(cid:114)(cid:83)(cid:99)(cid:96) (cid:133) (cid:33)(cid:62)(cid:93)(cid:121)(cid:62)(cid:111)(cid:75)(cid:2)(cid:12)(cid:93)(cid:62)(cid:112)(cid:112)(cid:83)(cid:127)(cid:71)(cid:62)(cid:114)(cid:83)(cid:99)(cid:96) (cid:134) (cid:56)(cid:115)(cid:93)(cid:96)(cid:75)(cid:111)(cid:62)(cid:70)(cid:83)(cid:93)(cid:83)(cid:114)(cid:123)(cid:2)(cid:50)(cid:75)(cid:112)(cid:114)(cid:83)(cid:96)(cid:81) (cid:136) (cid:33)(cid:62)(cid:93)(cid:83)(cid:71)(cid:83)(cid:99)(cid:115)(cid:112)(cid:2)(cid:11)(cid:75)(cid:82)(cid:62)(cid:120)(cid:83)(cid:99)(cid:111)(cid:2)(cid:14)(cid:75)(cid:114)(cid:75)(cid:71)(cid:114)(cid:83)(cid:99)(cid:96) (cid:133) (cid:3)(cid:96)(cid:62)(cid:93)(cid:123)(cid:112)(cid:83)(cid:112)(cid:2)(cid:62)(cid:96)(cid:73)(cid:2)(cid:14)(cid:75)(cid:112)(cid:83)(cid:81)(cid:96) (cid:33)(cid:62)(cid:93)(cid:121)(cid:62)(cid:111)(cid:75)(cid:2)(cid:45)(cid:111)(cid:75)(cid:73)(cid:83)(cid:71)(cid:114)(cid:83)(cid:99)(cid:96) (cid:133)
(cid:49)(cid:75)(cid:71)(cid:115)(cid:111)(cid:83)(cid:114)(cid:123)(cid:2)(cid:45)(cid:62)(cid:114)(cid:71)(cid:82)(cid:2)(cid:14)(cid:75)(cid:114)(cid:75)(cid:71)(cid:114)(cid:83)(cid:99)(cid:96)(cid:196)(cid:24)(cid:73)(cid:75)(cid:96)(cid:114)(cid:83)(cid:127)(cid:71)(cid:62)(cid:114)(cid:83)(cid:99)(cid:96) (cid:136) (cid:33)(cid:62)(cid:93)(cid:83)(cid:71)(cid:83)(cid:99)(cid:115)(cid:112)(cid:2)(cid:12)(cid:99)(cid:73)(cid:75)(cid:2)(cid:14)(cid:75)(cid:114)(cid:75)(cid:71)(cid:114)(cid:83)(cid:99)(cid:96) (cid:133) (cid:49)(cid:75)(cid:71)(cid:115)(cid:111)(cid:83)(cid:114)(cid:123)(cid:2)(cid:3)(cid:96)(cid:62)(cid:93)(cid:123)(cid:112)(cid:83)(cid:112) (cid:133) (cid:56)(cid:115)(cid:93)(cid:96)(cid:75)(cid:111)(cid:62)(cid:70)(cid:83)(cid:93)(cid:83)(cid:114)(cid:123)(cid:2)(cid:3)(cid:96)(cid:62)(cid:93)(cid:123)(cid:112)(cid:83)(cid:112) (cid:135) (cid:12)(cid:93)(cid:62)(cid:112)(cid:112)(cid:83)(cid:80)(cid:123)(cid:83)(cid:96)(cid:81)(cid:2)(cid:3)(cid:96)(cid:73)(cid:111)(cid:99)(cid:83)(cid:73)(cid:2)(cid:49)(cid:99)(cid:115)(cid:111)(cid:71)(cid:75)(cid:112)(cid:2)(cid:62)(cid:96)(cid:73)(cid:2)(cid:49)(cid:83)(cid:96)(cid:92)(cid:112) (cid:133) (cid:56)(cid:115)(cid:93)(cid:96)(cid:75)(cid:111)(cid:62)(cid:70)(cid:83)(cid:93)(cid:83)(cid:114)(cid:123)(cid:2)(cid:48)(cid:75)(cid:108)(cid:62)(cid:83)(cid:111) (cid:136) (cid:50)(cid:75)(cid:112)(cid:114)(cid:83)(cid:96)(cid:81) (cid:24)(cid:96)(cid:90)(cid:75)(cid:71)(cid:114)(cid:83)(cid:99)(cid:96)(cid:2)(cid:3)(cid:114)(cid:114)(cid:62)(cid:71)(cid:92)(cid:2)(cid:14)(cid:75)(cid:114)(cid:75)(cid:71)(cid:114)(cid:83)(cid:99)(cid:96) (cid:135) (cid:33)(cid:62)(cid:93)(cid:83)(cid:71)(cid:83)(cid:99)(cid:115)(cid:112)(cid:2)(cid:12)(cid:99)(cid:73)(cid:75)(cid:2)(cid:31)(cid:99)(cid:71)(cid:62)(cid:93)(cid:83)(cid:126)(cid:62)(cid:114)(cid:83)(cid:99)(cid:96) (cid:133) (cid:24)(cid:95)(cid:108)(cid:93)(cid:75)(cid:95)(cid:75)(cid:96)(cid:114)(cid:62)(cid:114)(cid:83)(cid:99)(cid:96) (cid:11)(cid:115)(cid:80)(cid:80)(cid:75)(cid:111)(cid:2)(cid:36)(cid:120)(cid:75)(cid:111)(cid:111)(cid:115)(cid:96)(cid:2)(cid:45)(cid:111)(cid:75)(cid:73)(cid:83)(cid:71)(cid:114)(cid:83)(cid:99)(cid:96) (cid:133) (cid:48)(cid:75)(cid:75)(cid:96)(cid:114)(cid:111)(cid:62)(cid:96)(cid:71)(cid:123)(cid:2)(cid:14)(cid:75)(cid:114)(cid:75)(cid:71)(cid:114)(cid:83)(cid:99)(cid:96) (cid:134) (cid:12)(cid:111)(cid:123)(cid:108)(cid:114)(cid:99)(cid:81)(cid:111)(cid:62)(cid:108)(cid:82)(cid:123)(cid:2)(cid:33)(cid:83)(cid:112)(cid:115)(cid:112)(cid:75) (cid:133) (cid:33)(cid:62)(cid:93)(cid:83)(cid:71)(cid:83)(cid:99)(cid:115)(cid:112)(cid:2)(cid:12)(cid:99)(cid:73)(cid:75)(cid:2)(cid:21)(cid:83)(cid:93)(cid:114)(cid:75)(cid:111)(cid:83)(cid:96)(cid:81) (cid:133)
(cid:56)(cid:115)(cid:93)(cid:96)(cid:75)(cid:111)(cid:62)(cid:70)(cid:83)(cid:93)(cid:83)(cid:114)(cid:123)(cid:2)(cid:12)(cid:93)(cid:62)(cid:112)(cid:112)(cid:83)(cid:127)(cid:71)(cid:62)(cid:114)(cid:83)(cid:99)(cid:96) (cid:134) (cid:45)(cid:62)(cid:112)(cid:112)(cid:121)(cid:99)(cid:111)(cid:73)(cid:2)(cid:31)(cid:75)(cid:62)(cid:92)(cid:112) (cid:133) (cid:16)(cid:96)(cid:120)(cid:83)(cid:111)(cid:99)(cid:96)(cid:95)(cid:75)(cid:96)(cid:114) (cid:33)(cid:62)(cid:93)(cid:83)(cid:71)(cid:83)(cid:99)(cid:115)(cid:112)(cid:2)(cid:45)(cid:62)(cid:71)(cid:92)(cid:62)(cid:81)(cid:75)(cid:2)(cid:14)(cid:75)(cid:114)(cid:75)(cid:71)(cid:114)(cid:83)(cid:99)(cid:96) (cid:133) (cid:51)(cid:96)(cid:108)(cid:111)(cid:99)(cid:114)(cid:75)(cid:71)(cid:114)(cid:75)(cid:73)(cid:2)(cid:3)(cid:45)(cid:24)(cid:2)(cid:56)(cid:115)(cid:93)(cid:96)(cid:75)(cid:111)(cid:62)(cid:70)(cid:83)(cid:93)(cid:83)(cid:114)(cid:123)(cid:2)(cid:14)(cid:83)(cid:112)(cid:71)(cid:99)(cid:120)(cid:75)(cid:111)(cid:123) (cid:133) (cid:33)(cid:62)(cid:93)(cid:83)(cid:71)(cid:83)(cid:99)(cid:115)(cid:112)(cid:2)(cid:12)(cid:99)(cid:73)(cid:75)(cid:2)(cid:12)(cid:93)(cid:62)(cid:112)(cid:112)(cid:83)(cid:127)(cid:71)(cid:62)(cid:114)(cid:83)(cid:99)(cid:96) (cid:133) (cid:56)(cid:115)(cid:93)(cid:96)(cid:75)(cid:111)(cid:62)(cid:70)(cid:93)(cid:75)(cid:2)(cid:12)(cid:99)(cid:73)(cid:75)(cid:2)(cid:12)(cid:93)(cid:99)(cid:96)(cid:75)(cid:2)(cid:14)(cid:75)(cid:114)(cid:75)(cid:71)(cid:114)(cid:83)(cid:99)(cid:96) (cid:133) (cid:33)(cid:62)(cid:93)(cid:83)(cid:71)(cid:83)(cid:99)(cid:115)(cid:112)(cid:2)(cid:12)(cid:99)(cid:73)(cid:75)(cid:2)(cid:14)(cid:75)(cid:99)(cid:70)(cid:80)(cid:115)(cid:112)(cid:71)(cid:62)(cid:114)(cid:83)(cid:99)(cid:96) (cid:133) (cid:12)(cid:99)(cid:96)(cid:127)(cid:81)(cid:115) (cid:33)(cid:111) (cid:62)(cid:62) (cid:96)(cid:114)(cid:83) (cid:62)(cid:99) (cid:81)(cid:96) (cid:75)(cid:2)(cid:62) (cid:95)(cid:96) (cid:75)(cid:73) (cid:96)(cid:2)(cid:12) (cid:114)(cid:82)(cid:62)(cid:96)(cid:81)(cid:75) (cid:56)(cid:115)(cid:93)(cid:96)(cid:75)(cid:111)(cid:62)(cid:70)(cid:83)(cid:93)(cid:83)(cid:114)(cid:123)(cid:2)(cid:21)(cid:83)(cid:122)(cid:83)(cid:96)(cid:81)(cid:196)(cid:24)(cid:96)(cid:73)(cid:115)(cid:71)(cid:83)(cid:96)(cid:81)(cid:2)(cid:12)(cid:99)(cid:95)(cid:95)(cid:83)(cid:114)(cid:112)(cid:2)(cid:14)(cid:75)(cid:114)(cid:75)(cid:71)(cid:114)(cid:83)(cid:99)(cid:96) (cid:135) Fig.8. CybersecurityTasksintheRUPcycle 7.1 Analysisanddesign Analysis and Design involves translating requirements into a formal model of the software. It results in a descriptionofthesystemthatwillbeimplementedandarethedocumentssoftwareengineersuseasaguide whiletheyimplementthesystem.Malwaredetection,prediction,andclassificationareimportanttasks, particularlyforAndroidapplications[17–26,34,150,179],butalsoforanysystemthatcouldbeinjectedwith malware. Malware allows attackers to take advantage of security ￿aws in systems. Understanding types of ACMComput.Surv.,Vol.37,No.4,Article111.Publicationdate:March2024.ASurveyofSourceCodeRepresentationsforMachineLearning-BasedCybersecurityTasks • 111:21
malwareandransomware,aswellaswheretheyoccurallowsonetotakestepstoavoidtheseissuesbefore implementingasystem.Securityanalysisisataskthatwasusedparticularlyforsmartcontracts[173],butit canbegeneralizedtoanysystemthathasthepotentialtobecompromised. 7.2 Implementation Implementationistheactualcodingofthesoftwaremodel.Thisiswhenthedesignfromthepreviousphaseis broughttolife.Vulnerabilityrepairisataskthat￿xesvulnerabilitiesincode.This￿tsintoimplementation becauseitisactuallyimplementingthe￿xtothecodewhilethedeveloperisworking[29,43,115].Moreover, thereareafewtasksinvolvedwithinferringorpredictingelementsofcode,suchasbu￿eroverrunprediction and vulnerability prediction. Bu￿er overrun is a type of vulnerability and Choi et al. [127] focuses their predictionapproachonthisonetypeofvulnerability,ratherthanmanyothers.Thesepredictiontechniques allowdeveloperstoknowwheretheseissuesmayoccurinthecodewhiletheyareimplementingit. 7.3 Testing Testing involves exercising the software for any ￿aws or inconsistencies with the design. Many detection techniquesfallintothiscategory,suchascryptographymisuse,maliciouscodedetection,filtering,classi- fication,deobfuscation,andlocalization,maliciousbehaviourdetection,passwordleaks,injection a￿ackdetection,reentrancydetection,vulnerabilityanalysis,detection,classification,prediction, localization,extrapolation,andtesting,vulnerablecodeclonedetectionandunprotectedAPIvulnera- bilitydiscovery.Thesee￿ortsinthetestingphasecanprotectsystemsagainstpotentialsecurityissuesthat couldbeverycostlyiffoundwhenthesystemisinproduction. 7.4 ConfigurationandChangeManagement. Con￿gurationandchangemanagementtracksandmaintainsaprojectasitisevolvingthroughtime.Itensures thatthecodecreatedduringimplementationisstilluseableandcanbereusedthroughoutotherportionsofthe projectifneeded[198].Modernsoftwaredevelopmentwork￿owsoftenuseremoterepositoriestotrackthecode thathasbeencreatedanditschanges.Thus,commitsareusuallythesourceofavulnerabilityorissue,andthe￿x toavulnerabilityorissue.Zhouetal.[130]andNguyen-Truongetal.[129]createdtechniquestoassistinthe classificationofsecuritycommits,aswellasidentifyingvulnerabilityinducing,orvulnerabilityfixing commits,includingsecuritypatchdetection. 7.5 Environment Environmentfocusesonthesoftwaredevelopmentenvironmentrequiredfortheengineerstodevelopthesystem. This includes techniques and processes required by developers [198]. Packages are an essential part of the developmentprocess.Theyprovideusefultechniquesthatcangreatlysimplifytheimplementationofasystem. However,somepackagescancontainsometypeofvulnerabilityormalwarethatcanjeopardizetheintegrityof thesystem.Therefore,maliciouspackagedetectionisanimportanttaskthatcanprotectsystemsfromsuch malicioussoftware. Ascryptocurrencybecomesamorepopularandprevalenttopic,researchisalsostartingtofocusoncreating ML-basedtechniquestoassistinissuesrelatingtocryptocurrenciesandsmartcontracts[46,47,80,102,105,108, 165,173,174,177].Allofthesepapersareinterestedinvulnerabilitydetectionortesting,andsecurityanalysisof smartcontracts.Twopapers[47,104]focusonaparticularvulnerabilitycalledreentrancyattacks,whichisa vulnerabilityspeci￿ctosmartcontracts.Thisvulnerabilityresultsinanattackerbeingabletowithdrawfunds fromasmartcontractrepeatedlyandtransferthem[47]. ACMComput.Surv.,Vol.37,No.4,Article111.Publicationdate:March2024.111:22 • BeatriceCasey,JoannaC.S.Santos,andGeorgePerry RQ3Findings: Vulnerabilitydetectionbyfaristhemostpopulartask,with75papersfocusingonthistask. • Certain papers focus solely on detecting one type of vulnerability, such as bu￿er overrun prediction, • reentrancydetection,andinjectionattackdetection. Amajorityofthesetasks￿tunderthetestingcategoryoftheRUPcycle,meaningthatthesetechniques • areaimedforevaluatingthesecurityofalreadywrittencodebeforeitisdeployed. 8 RQ4WHATPROGRAMMINGLANGUAGESARECOVEREDBYEXISTINGTECHNIQUES? Table3showstheprogramminglanguagesthatarecoveredbyexistingtechniques.WeobservedthatCisthe mostpopularlanguage;80(57.1%)papersdevelopedtechniquesforCprograms.C++isthesecondmostcommon languagethatiscoveredby50papers(35.7%).Allofthese50papersalsosupportedC.ThepopularityoftheC andC++languagescouldbeattributedtotwomainfactors.First,thiscouldbeduetotheavailabilityof Cdatasets forthesedi￿erentcybersecuritytasks[199–202].Second,theselanguagesareatahigherriskofmemory-related vulnerabilities,whichareoftensevereandpervasiveinmanysoftwaresystems[203]. WealsoobservedanumberoftechniquesaregearedtowardssecuritytasksforAndroidapplications[12,12,17– 27,34,97,150,179].AlthoughAndroidapplicationscanbewrittenusingKotlinandJava,thepapersstudiedin thissurveyonlyfocusedonappswritteninJava.Additionally,withtheincreasingpopularityofsmartcontracts, anumberofpapersdevelopedtechniquesforSolidity,thelanguagethatsmartcontractsarewrittenin. Table3. Languagessupportedbyexistingtechniques.JS=JavaScript;TS=TypeScript;SpiderMonkey=SM Lang.#Papers Lang. #Papers Lang. #Papers Lang.#Papers Lang. #Papers Lang. #Papers Lang. #Papers C 80(57.1%) JS 12(8.6%) Python6(4.3%) C# 2(1.4%) XML/XUL2(1.4%) Groovy 1(0.7%) Ruby 1(0.7%) C++ 50(35.7%) Solidity12(8.6%) CSS 3(2.1%) SQL 2(1.4%) Gecko 1(0.7%) HTML 1(0.7%) Smali1(0.7%) Java 34(24.2%) PHP 8(5.7%) Rust 3(2.1%) TS 2(1.4%) Go 1(0.7%) Powershell1(0.7%) SM 1(0.7%)
Twoworks[34,124].didnotspecifythelanguage(s)used.Ifapaperdidnotspecifythelanguageordatasetused, welabeleditas“notspeci￿ed.”Notechniqueswerelanguage-agnostic,meaningthatallthesystemscreatedwere madeonlyforoneorpotentiallyafewlanguages.Wealsofoundthatthereareonly6techniquesforPythonand 12forJavaScript.Thisissurprisingbecausearecentsurveyshowsthattheselanguagesarebecomingmore andmorepopularwithdevelopersinpractice5. RQ4Findings: Cisthemostcommonlanguagethatissupportedbycybersecuritytechniques. • Despitetheirpopularity,therearenotmanytechniquesforPythonandJavascript. • AlargeportionofpapersareaimedatsolvingsecurityissuesinAndroidapplications.Thus,Javaisalsoa • popularlanguage,with34techniquesgearedtowardssolvingtheseissuesinJava. Giventheincreasingpopularityofsmartcontracts,thereareanumberoftechniques(12)thatwerecreated • forSolidity-thelanguageusedtocreatesmartcontracts. 9 RQ5:WHATMODELSARECOMMONLYUSEDWITHDIFFERENTREPRESENTATIONS? Tosortthedi￿erenttypesofmodelsusedthroughoutthepapers,wetookinspirationfromSiowetal.[204]and classi￿edtheminto￿vecategories:sequence-based models,feature-based models,tree-based models,graph-based 5https://spectrum.ieee.org/the-top-programming-languages-2023 ACMComput.Surv.,Vol.37,No.4,Article111.Publicationdate:March2024.ASurveyofSourceCodeRepresentationsforMachineLearning-BasedCybersecurityTasks • 111:23 Table4. Di￿erentmodelsusedbythesurveyedpapers. ModelType Models SequenceBased Transformer[43,44,54,91,100,104,115,118,121,142],DiscreteFourierTransform[51],ConvolutionalNeural Network[11,21,35,37,88,93,125,131,138,151,159,178],TextCNN[38],TemporalCNN[14],RecurrentNeural Network[37,39,91,136,144],BidirectionalRNN[126,171],Autoencoders[29],seq2seq[85,142],BERT[134,160], codeBERT[128,137],JavaBERT[137],HierarchicalAttentionNetwork[90,96,170],doc2vec[139],word2vec[119, 139],Selfattentionnetworks[96],GatedRecurrentUnit[172],BidirectionalGatedRecurrentUnit[87,158,169,178], OnlineLearning[19],LongShortTermMemory[11,117,122,133,134,145,152,161,164],BidirectionalLSTM [13,16,41,47,96,107,134,158,167,178],ParagraphVectorDistributedMemory[165],DeepLearningAttention- basedConvolutionalGatedRecurrentUnit[123],BidirectionalRNNforVulnerabilityDetectionandLocating[48], PassiveAggressiveClassi￿er[18],ExtremeLearningMachine[150],Encoder-Decoder[140],GPT[141] GraphBased GraphNeuralNetwork[12,45,46,79,81,94,99,103,108,110,112–114,143,162],DeepBeliefNetwork[24], GraphConvolutionalNetwork[34,82,87,96,114,175],GraphAttentionNetwork[98],FeatureAttention-Graph ConvolutionalNetwork[89],RecurrentGraphConvolutionalNetwork[111],GatedGraphNeuralNetwork[86,100], BidirectionalGraphNeuralNetwork[36],GraphCodeBERT[102] TreeBased Boosting[155,156],ExtremeGradientBoosting[124],LightGradientBoostingMachine[124],adaboost[92], GradientBoostingDecisionTree[92],Bagging[156],RandomForest[20,22,23,25,27,53,83,84,92,106,124,146, 148,149,155,161,173,177],coForest[106],DecisionTree[23,27,84,92,146,148,153,155,161,173,177],code2vec [176],NaiveBayes[17,23,27,53,84,124,148,153,161],TreeaugmentedNaiveBayes[84],AbstractSyntaxTree NeuralNetwork[33],Extra-treesClassi￿er[35] NeuralNetworks Multi-LayerPerceptron[22,80,83,92,95,110,135,143,152,168],NeuralNetwork[105,161,173,179],DeepNeural Network[11,109,146,157,166],ComplexDeepNeuralNetwork[146],AbstractSyntaxTreeNeuralNetwork[33], AttentionNeuralNetwork[101],NeuralMemoryNetwork[127],RandomNeuralNetwork[131] Feature-Based Clustering[92],K-meansclustering[139],K-medianclustering[180],NaiveBayes[17,23,27,53,84,124,148,153, 161],GaussianNaiveBayes,TreeaugmentedNaiveBayes[84],logisticregression[17,83,84,92,95,106,124,146, 148,153,154,177],ExtremeMachineLearning[150],NearestNeighbor[20],K-NearestNeighbor[22,92,146,153, 161,177],linearregression[146],SupportVectorMachine[20,23,26,27,32,92,97,116,124,132,139,146,147,154– 156,161,163,173,177],C-SupportVectorClassi￿cationVariantofSupportVectorMachine[146],LinearDiscriminant Analysis[92,156,161],Density-BasedSpatialClusteringofApplicationswithNoise[139] models,andneuralnetworks.Table4demonstratesthemodelsusedbythesurveyedpapersandhowtheyfall intoeachcategory.Byfar,amajorityofthesemodelsaresequence-based,particularlyCNNs,Transformers,and LSTMs.Thisismostlikelyduetotheirgeneralpopularity,butalsobecausethesemodelsareverypowerful,are abletoovercomethevanishinggradientproblem,andareabletohandlelong-termdependencies[205]. However,SVMsoverallwerethemostpopularmodel.Thismightbebecausewhendeterminingifcodehassome sortofcybersecurityissue,suchasavulnerability,themostusefulthingtolearnorfocusonarethefeatures ofthecode.Forexample,ifamodelcanlearnwhatfeaturesmakeapieceofcodevulnerableornotvulnerable, thenthemodelwillbeabletosuccessfullyperformvulnerabilitydetection.SVMsarepowerfulinlearningthe
featuresthatdi￿erentiateclasses,orinthiscase,code.Therefore,SVMsareusefulforlearningthefeaturesof codethatwouldmakeitvulnerableormaliciousversusbenign. Figure9showsthetypesofmodelsthatwereusedforeachtypeofrepresentation.Additionally,theredboxes indicatemodeltypesthatcouldpotentiallybeusedbytheserepresentations,althoughwedidnotobserveit explicitlyinourdata.Manypapersalsousedmultipledi￿erentmodelstocomparetheperformanceoftheir method.Naturally,thereisaconstraintonthetypeofrepresentationandthetypeofmodelused.Forexample, onlygraphrepresentationscanbeusedwithagraph-basedmodel. ACMComput.Surv.,Vol.37,No.4,Article111.Publicationdate:March2024.111:24 • BeatriceCasey,JoannaC.S.Santos,andGeorgePerry (cid:37) (cid:55)(cid:44)(cid:36) (cid:51) (cid:82)(cid:81)(cid:36) (cid:83) (cid:40) (cid:78)(cid:73)(cid:51) (cid:82)(cid:36) (cid:72)(cid:83) (cid:3) (cid:38)(cid:36) (cid:54)(cid:85)(cid:44) (cid:81)(cid:79)(cid:54) (cid:80)(cid:3) (cid:76) (cid:41)(cid:54) (cid:70)(cid:38) (cid:88) (cid:76)(cid:55) (cid:93)(cid:68) (cid:42)(cid:68)(cid:55) (cid:69)(cid:68) (cid:68)(cid:14) (cid:87) (cid:87) (cid:90)(cid:79) (cid:76) (cid:76) (cid:87)(cid:79) (cid:82) (cid:82) (cid:76)(cid:86) (cid:82)(cid:82)(cid:81) (cid:81) (cid:81)(cid:85)(cid:71) (cid:54) (cid:54) (cid:54) (cid:54)(cid:54)(cid:54) (cid:49) (cid:49)(cid:49) (cid:49)(cid:49)(cid:49) (cid:41) (cid:41) (cid:41)(cid:72) (cid:72) (cid:72) (cid:72)(cid:72)(cid:72) (cid:72) (cid:72)(cid:72) (cid:72)(cid:72)(cid:72)(cid:42) (cid:42) (cid:42)(cid:72) (cid:72) (cid:72)(cid:55) (cid:55) (cid:55)(cid:84) (cid:84) (cid:84) (cid:84)(cid:84)(cid:84) (cid:88) (cid:88)(cid:88) (cid:88)(cid:88)(cid:88)(cid:85) (cid:85) (cid:85)(cid:68) (cid:68) (cid:68)(cid:85) (cid:85) (cid:85)(cid:88) (cid:88) (cid:88) (cid:88)(cid:88)(cid:88) (cid:85) (cid:85)(cid:85) (cid:85)(cid:85)(cid:85)(cid:68) (cid:68) (cid:68)(cid:72) (cid:72) (cid:72)(cid:87) (cid:87) (cid:87)(cid:68) (cid:68)(cid:68) (cid:68)(cid:68)(cid:68) (cid:72) (cid:72) (cid:72) (cid:72)(cid:72)(cid:72)(cid:83) (cid:83) (cid:83)(cid:88) (cid:88) (cid:88)(cid:72) (cid:72) (cid:72)(cid:79) (cid:79)(cid:79) (cid:79)(cid:79)(cid:79) (cid:81) (cid:81) (cid:81) (cid:81)(cid:81)(cid:81) (cid:3) (cid:3)(cid:3) (cid:3)(cid:3)(cid:3)(cid:75) (cid:75) (cid:75)(cid:85) (cid:85) (cid:85)(cid:16) (cid:16) (cid:16)(cid:49) (cid:49)(cid:49) (cid:49)(cid:49)(cid:49) (cid:72) (cid:72) (cid:72)(cid:70) (cid:70) (cid:70) (cid:70)(cid:70)(cid:70) (cid:37) (cid:37) (cid:37)(cid:16) (cid:16) (cid:16)(cid:72) (cid:72) (cid:72) (cid:72)(cid:72)(cid:72)(cid:69) (cid:69) (cid:69)(cid:16) (cid:16) (cid:16)(cid:72) (cid:72)(cid:72) (cid:72)(cid:72)(cid:72) (cid:68) (cid:68) (cid:68)(cid:69) (cid:69) (cid:69)(cid:16) (cid:16) (cid:16) (cid:16)(cid:16)(cid:16)(cid:68) (cid:68) (cid:68)(cid:87) (cid:87)(cid:87) (cid:87)(cid:87)(cid:87) (cid:86) (cid:86) (cid:86)(cid:69) (cid:69) (cid:69) (cid:69)(cid:69)(cid:69) (cid:68) (cid:68) (cid:68)(cid:90) (cid:90)(cid:90) (cid:90)(cid:90)(cid:90)(cid:86) (cid:86) (cid:86)(cid:72) (cid:72) (cid:72)(cid:68) (cid:68) (cid:68)
(cid:68)(cid:68)(cid:68) (cid:86) (cid:86) (cid:86)(cid:72) (cid:72) (cid:72)(cid:82) (cid:82)(cid:82) (cid:82)(cid:82)(cid:82) (cid:71) (cid:71) (cid:71)(cid:72) (cid:72) (cid:72)(cid:86) (cid:86) (cid:86) (cid:86)(cid:86)(cid:86)(cid:71) (cid:71) (cid:71)(cid:85) (cid:85)(cid:85) (cid:85)(cid:85)(cid:85) (cid:72) (cid:72) (cid:72) (cid:72)(cid:72)(cid:72) (cid:78) (cid:78)(cid:78) (cid:78)(cid:78)(cid:78) (cid:71) (cid:71) (cid:71)(cid:86) (cid:86)(cid:86) (cid:86)(cid:86)(cid:86) (cid:71) (cid:71) (cid:71) (cid:71)(cid:71)(cid:71) (cid:38) (cid:39) (cid:39)(cid:82) (cid:72) (cid:72)(cid:80) (cid:38)(cid:38) (cid:83) (cid:83)(cid:42)(cid:38) (cid:83) (cid:82)(cid:38) (cid:72) (cid:72)(cid:82) (cid:82) (cid:85)(cid:82) (cid:81)(cid:81) (cid:81)(cid:82)(cid:68)(cid:71) (cid:11)(cid:71) (cid:81) (cid:87) (cid:38)(cid:11)(cid:71) (cid:71)(cid:80)(cid:83)(cid:72) (cid:72)(cid:38)(cid:72) (cid:72) (cid:72) (cid:72) (cid:36)(cid:75)(cid:3) (cid:91)(cid:3) (cid:83)(cid:74) (cid:39)(cid:81) (cid:81) (cid:81)(cid:80) (cid:3) (cid:87) (cid:39)(cid:82)(cid:11) (cid:88)(cid:68) (cid:87) (cid:70) (cid:70)(cid:42)(cid:38)(cid:3)(cid:72) (cid:81) (cid:42)(cid:68)(cid:71) (cid:37) (cid:92) (cid:92)(cid:12)(cid:87) (cid:37) (cid:72) (cid:79)(cid:74) (cid:3) (cid:3)(cid:72)(cid:85) (cid:12)(cid:3)(cid:42) (cid:42)(cid:36)(cid:81)(cid:76)(cid:72) (cid:42)(cid:75)(cid:70) (cid:87) (cid:85) (cid:85)(cid:51)(cid:87) (cid:86) (cid:68) (cid:12)(cid:86) (cid:68) (cid:68)(cid:44)(cid:89) (cid:83) (cid:83)(cid:76) (cid:75) (cid:75)(cid:82)(cid:85) (cid:54) (cid:54) (cid:54)(cid:54)(cid:54)(cid:49) (cid:49) (cid:49) (cid:49) (cid:49)(cid:41) (cid:41) (cid:41) (cid:41) (cid:41)(cid:72) (cid:72) (cid:72)(cid:72)(cid:72)(cid:72) (cid:72) (cid:72) (cid:72) (cid:72)(cid:42) (cid:42) (cid:42)(cid:72) (cid:72) (cid:72) (cid:72) (cid:72)(cid:55) (cid:55) (cid:55) (cid:55)(cid:84) (cid:84) (cid:84)(cid:84)(cid:84) (cid:55)(cid:88) (cid:88) (cid:88) (cid:88) (cid:88)(cid:85) (cid:85) (cid:85)(cid:68) (cid:68) (cid:68) (cid:68) (cid:68)(cid:85) (cid:85) (cid:85) (cid:85)(cid:88) (cid:88) (cid:88)(cid:88)(cid:88) (cid:85)(cid:85) (cid:85) (cid:85) (cid:85) (cid:85)(cid:68) (cid:68) (cid:68)(cid:72) (cid:72) (cid:72) (cid:72) (cid:72)(cid:87) (cid:87) (cid:87) (cid:87) (cid:87)(cid:68) (cid:68) (cid:68) (cid:68) (cid:68)(cid:72) (cid:72) (cid:72)(cid:72)(cid:72) (cid:83) (cid:83) (cid:83)(cid:88) (cid:88) (cid:88) (cid:88) (cid:88)(cid:72) (cid:72) (cid:72) (cid:72) (cid:72)(cid:79) (cid:79) (cid:79) (cid:79) (cid:79)(cid:81) (cid:81) (cid:81)(cid:81)(cid:81)(cid:3) (cid:3) (cid:3) (cid:3) (cid:3)(cid:75) (cid:75) (cid:75)(cid:85) (cid:85) (cid:85) (cid:85) (cid:85)(cid:16) (cid:16) (cid:16) (cid:16) (cid:16)(cid:49) (cid:49) (cid:49) (cid:49) (cid:49)(cid:72) (cid:72) (cid:72) (cid:72) (cid:72)(cid:70) (cid:70) (cid:70)(cid:70)(cid:70)(cid:37) (cid:37) (cid:37) (cid:37) (cid:69)(cid:16) (cid:16)
(cid:16)(cid:72) (cid:72) (cid:72)(cid:72)(cid:72) (cid:69) (cid:69) (cid:69)(cid:16) (cid:16) (cid:16) (cid:16) (cid:16)(cid:72) (cid:72) (cid:72) (cid:72) (cid:72) (cid:68)(cid:68) (cid:68) (cid:68) (cid:68)(cid:69) (cid:69) (cid:69) (cid:69) (cid:69)(cid:16) (cid:16) (cid:16)(cid:16)(cid:16) (cid:68) (cid:68) (cid:68)(cid:87) (cid:87) (cid:87) (cid:87) (cid:87) (cid:86)(cid:86) (cid:86) (cid:86) (cid:86)(cid:69) (cid:69) (cid:69)(cid:69)(cid:69)(cid:68) (cid:68) (cid:68) (cid:68) (cid:68)(cid:90) (cid:90) (cid:90) (cid:90) (cid:90)(cid:86) (cid:86) (cid:86) (cid:72)(cid:72) (cid:72) (cid:72) (cid:72)(cid:68) (cid:68) (cid:68)(cid:68)(cid:68)(cid:86) (cid:86) (cid:86) (cid:86) (cid:86)(cid:72) (cid:72) (cid:72)(cid:82) (cid:82) (cid:82) (cid:82) (cid:82) (cid:71)(cid:71) (cid:71) (cid:71) (cid:71)(cid:72) (cid:72) (cid:72) (cid:72) (cid:72)(cid:86) (cid:86) (cid:86)(cid:86)(cid:86) (cid:71) (cid:71) (cid:71)(cid:85) (cid:85) (cid:85) (cid:85) (cid:85)(cid:72) (cid:72) (cid:72)(cid:72)(cid:72)(cid:78) (cid:78) (cid:78) (cid:78) (cid:78)(cid:71) (cid:71) (cid:71) (cid:71) (cid:71)(cid:86) (cid:86) (cid:86) (cid:86) (cid:86)(cid:71) (cid:71) (cid:71)(cid:71)(cid:71) (cid:44)(cid:81)(cid:38)(cid:38) (cid:87)(cid:42) (cid:72)(cid:85)(cid:82) (cid:88) (cid:85)(cid:85) (cid:83)(cid:81) (cid:70) (cid:68)(cid:76) (cid:85)(cid:87) (cid:68) (cid:83)(cid:85) (cid:82) (cid:42)(cid:44)(cid:68) (cid:44)(cid:79) (cid:75) (cid:80)(cid:39) (cid:70)(cid:3) (cid:38)(cid:70) (cid:85)(cid:39) (cid:3) (cid:72) (cid:68)(cid:41)(cid:11) (cid:68)(cid:87) (cid:41)(cid:38)(cid:3) (cid:71)(cid:68) (cid:83)(cid:42) (cid:74)(cid:54) (cid:42) (cid:88)(cid:87) (cid:75)(cid:39) (cid:72)(cid:68)(cid:81) (cid:85)(cid:3) (cid:41) (cid:68)(cid:76) (cid:41)(cid:83) (cid:79)(cid:42) (cid:3)(cid:83) (cid:79) (cid:38)(cid:82) (cid:12)(cid:72) (cid:90) (cid:68)(cid:87) (cid:79)(cid:79) (cid:54) (cid:54) (cid:54) (cid:54)(cid:54) (cid:54)(cid:49)(cid:49) (cid:49)(cid:49) (cid:49) (cid:49)(cid:41) (cid:41) (cid:41)(cid:72) (cid:72) (cid:72) (cid:72)(cid:72) (cid:72)(cid:72)(cid:72) (cid:72)(cid:72) (cid:72) (cid:72)(cid:42) (cid:42) (cid:42) (cid:42)(cid:72) (cid:72) (cid:72)(cid:84) (cid:84) (cid:84) (cid:84)(cid:84) (cid:84)(cid:55) (cid:55) (cid:55)(cid:55) (cid:88)(cid:88) (cid:88)(cid:88) (cid:88) (cid:88)(cid:85) (cid:85) (cid:85) (cid:85)(cid:68) (cid:68) (cid:68)(cid:88) (cid:88) (cid:88) (cid:88)(cid:88) (cid:88)(cid:85) (cid:85) (cid:85)(cid:85) (cid:85)(cid:85) (cid:85)(cid:85) (cid:85) (cid:85)(cid:68) (cid:68) (cid:68) (cid:68)(cid:72) (cid:72) (cid:72)(cid:72) (cid:87) (cid:87) (cid:87)(cid:68)(cid:68) (cid:68)(cid:68) (cid:68) (cid:68)(cid:72) (cid:72) (cid:72) (cid:72)(cid:72) (cid:72)(cid:83) (cid:83) (cid:83) (cid:83)(cid:88) (cid:88) (cid:88)(cid:72) (cid:72) (cid:72)(cid:72) (cid:79)(cid:79) (cid:79)(cid:79) (cid:79) (cid:79)(cid:81) (cid:81) (cid:81) (cid:81)(cid:81) (cid:81)(cid:3)(cid:3) (cid:3)(cid:3)
(cid:3) (cid:3)(cid:75) (cid:75) (cid:75) (cid:75)(cid:85) (cid:85) (cid:85)(cid:16) (cid:16) (cid:16)(cid:16) (cid:49)(cid:49) (cid:49)(cid:49) (cid:49) (cid:49)(cid:72) (cid:72) (cid:72)(cid:70) (cid:70) (cid:70) (cid:70)(cid:70) (cid:70)(cid:69) (cid:69) (cid:69)(cid:69)(cid:16) (cid:16) (cid:16) (cid:16)(cid:72) (cid:72) (cid:72) (cid:72)(cid:72) (cid:72)(cid:69) (cid:69) (cid:69) (cid:69)(cid:16) (cid:16) (cid:16)(cid:72)(cid:72) (cid:72)(cid:72) (cid:72) (cid:72)(cid:68) (cid:68) (cid:68)(cid:68) (cid:69) (cid:69) (cid:69)(cid:16) (cid:16) (cid:16) (cid:16)(cid:16) (cid:16)(cid:68) (cid:68) (cid:68) (cid:68)(cid:87)(cid:87) (cid:87)(cid:87) (cid:87) (cid:87)(cid:86) (cid:86) (cid:86)(cid:86)(cid:69) (cid:69) (cid:69) (cid:69)(cid:69) (cid:69)(cid:68) (cid:68) (cid:68)(cid:90)(cid:90) (cid:90)(cid:90) (cid:90) (cid:90)(cid:86) (cid:86) (cid:86) (cid:86)(cid:72) (cid:72) (cid:72)(cid:72)(cid:68) (cid:68) (cid:68) (cid:68)(cid:68) (cid:68)(cid:86) (cid:86) (cid:86)(cid:72) (cid:72) (cid:72) (cid:72)(cid:82)(cid:82) (cid:82)(cid:82) (cid:82) (cid:82)(cid:71) (cid:71) (cid:71)(cid:71) (cid:72) (cid:72) (cid:72)(cid:86) (cid:86) (cid:86) (cid:86)(cid:86) (cid:86)(cid:71) (cid:71) (cid:71) (cid:71)(cid:85)(cid:85) (cid:85)(cid:85) (cid:85) (cid:85)(cid:72) (cid:72) (cid:72) (cid:72)(cid:72) (cid:72)(cid:78)(cid:78) (cid:78)(cid:78) (cid:78) (cid:78)(cid:71) (cid:71) (cid:71)(cid:86)(cid:86) (cid:86)(cid:86) (cid:86) (cid:86)(cid:71) (cid:71) (cid:71) (cid:71)(cid:71) (cid:71) (cid:53)(cid:72) (cid:54)(cid:51) (cid:51)(cid:51) (cid:74) (cid:72)(cid:85) (cid:85)(cid:85) (cid:88)(cid:82) (cid:82) (cid:80)(cid:82) (cid:79)(cid:74) (cid:83) (cid:68)(cid:74) (cid:68)(cid:85) (cid:72) (cid:85)(cid:85) (cid:54) (cid:81)(cid:68) (cid:3)(cid:68) (cid:85) (cid:40) (cid:87)(cid:80) (cid:87) (cid:39)(cid:80) (cid:76)(cid:92) (cid:70)(cid:91) (cid:42)(cid:3)(cid:3) (cid:3) (cid:3)(cid:83)(cid:42)(cid:42) (cid:86) (cid:42)(cid:85)(cid:79) (cid:85)(cid:85) (cid:72)(cid:76) (cid:85)(cid:68)(cid:70)(cid:68) (cid:68)(cid:86)(cid:72) (cid:83)(cid:83) (cid:83)(cid:86)(cid:86) (cid:75)(cid:75) (cid:76) (cid:75)(cid:82)(cid:81) (cid:54) (cid:54)(cid:54) (cid:54) (cid:54) (cid:54)(cid:49)(cid:49) (cid:49) (cid:49) (cid:49) (cid:49)(cid:41)(cid:72) (cid:72)(cid:72) (cid:72) (cid:72) (cid:72)(cid:72)(cid:72) (cid:72) (cid:72) (cid:72) (cid:72)(cid:42) (cid:42) (cid:42)(cid:42)(cid:72)(cid:55)(cid:84) (cid:84)(cid:84) (cid:84) (cid:84) (cid:84)(cid:55) (cid:55) (cid:55)(cid:88)(cid:88) (cid:88) (cid:88) (cid:88) (cid:88)(cid:85) (cid:85) (cid:85)(cid:85)(cid:68)(cid:85)(cid:88) (cid:88)(cid:88) (cid:88) (cid:88) (cid:88)(cid:85) (cid:85) (cid:85)(cid:85)(cid:85) (cid:85) (cid:85) (cid:85) (cid:85)(cid:68) (cid:68) (cid:68)(cid:68)(cid:72)(cid:72) (cid:72) (cid:72)(cid:87)(cid:68)(cid:68) (cid:68) (cid:68) (cid:68)
(cid:68)(cid:72) (cid:72)(cid:72) (cid:72) (cid:72) (cid:72)(cid:83) (cid:83) (cid:83)(cid:83)(cid:88)(cid:72)(cid:72) (cid:72) (cid:72)(cid:79)(cid:79) (cid:79) (cid:79) (cid:79) (cid:79)(cid:81) (cid:81)(cid:81) (cid:81) (cid:81) (cid:81)(cid:3)(cid:3) (cid:3) (cid:3) (cid:3) (cid:3)(cid:75) (cid:75) (cid:75)(cid:75)(cid:85)(cid:16)(cid:16) (cid:16) (cid:16)(cid:49)(cid:49) (cid:49) (cid:49) (cid:49) (cid:49)(cid:72)(cid:70) (cid:70)(cid:70) (cid:70) (cid:70) (cid:70)(cid:37)(cid:69) (cid:69) (cid:69)(cid:16) (cid:16) (cid:16)(cid:16)(cid:72) (cid:72)(cid:72) (cid:72) (cid:72) (cid:72)(cid:69) (cid:69) (cid:69)(cid:69)(cid:16)(cid:72)(cid:72) (cid:72) (cid:72) (cid:72) (cid:72)(cid:68) (cid:68) (cid:68)(cid:68) (cid:69)(cid:16) (cid:16)(cid:16) (cid:16) (cid:16) (cid:16)(cid:68) (cid:68) (cid:68)(cid:68)(cid:87)(cid:87) (cid:87) (cid:87) (cid:87) (cid:87)(cid:86) (cid:86) (cid:86)(cid:86)(cid:69) (cid:69)(cid:69) (cid:69) (cid:69) (cid:69)(cid:68)(cid:90)(cid:90) (cid:90) (cid:90) (cid:90) (cid:90)(cid:86) (cid:86) (cid:86)(cid:86)(cid:72) (cid:72) (cid:72)(cid:72)(cid:68) (cid:68)(cid:68) (cid:68) (cid:68) (cid:68)(cid:86)(cid:72) (cid:72) (cid:72)(cid:72)(cid:82)(cid:82) (cid:82) (cid:82) (cid:82) (cid:82)(cid:71) (cid:71) (cid:71)(cid:71) (cid:72)(cid:86) (cid:86)(cid:86) (cid:86) (cid:86) (cid:86)(cid:71) (cid:71) (cid:71)(cid:71)(cid:85)(cid:85) (cid:85) (cid:85) (cid:85) (cid:85)(cid:72) (cid:72)(cid:72) (cid:72) (cid:72) (cid:72)(cid:78)(cid:78) (cid:78) (cid:78) (cid:78) (cid:78)(cid:71)(cid:86)(cid:86) (cid:86) (cid:86) (cid:86) (cid:86)(cid:71) (cid:71)(cid:71) (cid:71) (cid:71) (cid:71) (cid:57) (cid:70)(cid:68) (cid:51) (cid:82)(cid:79) (cid:85)(cid:88) (cid:81)(cid:70)(cid:82)(cid:55) (cid:72) (cid:87) (cid:11)(cid:82) (cid:71)(cid:74) (cid:72)(cid:82) (cid:38)(cid:3) (cid:11) (cid:71) (cid:82)(cid:85)(cid:41) (cid:91)(cid:78) (cid:57) (cid:68) (cid:44)(cid:72) (cid:70)(cid:87)(cid:79) (cid:38)(cid:72) (cid:88)(cid:82) (cid:41) (cid:80) (cid:21)(cid:37)(cid:81) (cid:41)(cid:68)(cid:90) (cid:42) (cid:89)(cid:40)(cid:3)(cid:76) (cid:79)(cid:54) (cid:42)(cid:93) (cid:72)(cid:3) (cid:3)(cid:12) (cid:53)(cid:42) (cid:44)(cid:72) (cid:70)(cid:79) (cid:38) (cid:12)(cid:76) (cid:55)(cid:85) (cid:70)(cid:85) (cid:41)(cid:68) (cid:72) (cid:42)(cid:83) (cid:86)(cid:75) (cid:54) (cid:54) (cid:54) (cid:54) (cid:54)(cid:49) (cid:49) (cid:49) (cid:49) (cid:49) (cid:49)(cid:41) (cid:41) (cid:41)(cid:72) (cid:72) (cid:72) (cid:72) (cid:72)(cid:72) (cid:72) (cid:72) (cid:72) (cid:72) (cid:72)(cid:42) (cid:42) (cid:42)(cid:42) (cid:42)(cid:72) (cid:72) (cid:72)(cid:55)(cid:84) (cid:84) (cid:84) (cid:84) (cid:84)(cid:55) (cid:55) (cid:55)(cid:88) (cid:88) (cid:88) (cid:88) (cid:88) (cid:88)(cid:85) (cid:85) (cid:85)(cid:85) (cid:85)(cid:68) (cid:68) (cid:68)(cid:85)(cid:88) (cid:88) (cid:88) (cid:88) (cid:88)(cid:85) (cid:85) (cid:85)(cid:85) (cid:85) (cid:85) (cid:85) (cid:85)
(cid:85)(cid:68) (cid:68) (cid:68)(cid:68) (cid:68)(cid:72) (cid:72) (cid:72) (cid:72)(cid:87) (cid:87) (cid:87)(cid:68) (cid:68) (cid:68) (cid:68) (cid:68) (cid:68)(cid:72) (cid:72) (cid:72) (cid:72) (cid:72)(cid:83) (cid:83) (cid:83)(cid:83) (cid:83)(cid:88) (cid:88) (cid:88)(cid:72) (cid:72) (cid:72) (cid:72)(cid:79) (cid:79) (cid:79) (cid:79) (cid:79) (cid:79)(cid:81) (cid:81) (cid:81) (cid:81) (cid:81)(cid:3) (cid:3) (cid:3) (cid:3) (cid:3) (cid:3)(cid:75) (cid:75) (cid:75)(cid:75) (cid:75)(cid:85) (cid:85) (cid:85)(cid:16) (cid:16) (cid:16) (cid:16)(cid:49) (cid:49) (cid:49) (cid:49) (cid:49) (cid:49)(cid:72) (cid:72) (cid:72)(cid:70) (cid:70) (cid:70) (cid:70) (cid:70)(cid:37) (cid:69) (cid:69) (cid:69)(cid:16) (cid:16) (cid:16)(cid:16) (cid:16)(cid:72) (cid:72) (cid:72) (cid:72) (cid:72)(cid:69) (cid:69) (cid:69)(cid:69) (cid:69)(cid:16) (cid:16) (cid:16)(cid:72) (cid:72) (cid:72) (cid:72) (cid:72) (cid:72)(cid:68) (cid:68) (cid:68)(cid:68)(cid:69) (cid:69) (cid:69)(cid:16) (cid:16) (cid:16) (cid:16) (cid:16)(cid:68) (cid:68) (cid:68)(cid:68) (cid:68)(cid:87) (cid:87) (cid:87) (cid:87) (cid:87) (cid:87)(cid:86) (cid:86) (cid:86)(cid:86)(cid:69) (cid:69) (cid:69) (cid:69) (cid:69)(cid:68) (cid:68) (cid:68)(cid:90) (cid:90) (cid:90) (cid:90) (cid:90) (cid:90)(cid:86) (cid:86) (cid:86)(cid:86) (cid:86)(cid:72) (cid:72) (cid:72)(cid:72)(cid:68) (cid:68) (cid:68) (cid:68) (cid:68)(cid:86) (cid:86) (cid:86)(cid:72) (cid:72) (cid:72)(cid:72) (cid:72)(cid:82) (cid:82) (cid:82) (cid:82) (cid:82) (cid:82)(cid:71) (cid:71) (cid:71)(cid:71)(cid:72) (cid:72) (cid:72)(cid:86) (cid:86) (cid:86) (cid:86) (cid:86)(cid:71) (cid:71) (cid:71)(cid:71) (cid:71)(cid:85) (cid:85) (cid:85) (cid:85) (cid:85) (cid:85)(cid:72) (cid:72) (cid:72) (cid:72) (cid:72)(cid:78) (cid:78) (cid:78) (cid:78) (cid:78) (cid:78)(cid:71) (cid:71) (cid:71)(cid:86) (cid:86) (cid:86) (cid:86) (cid:86) (cid:86)(cid:71) (cid:71) (cid:71) (cid:71) (cid:71) (cid:54)(cid:72)(cid:84)(cid:88)(cid:72)(cid:81)(cid:70)(cid:72)(cid:16)(cid:69)(cid:68)(cid:86)(cid:72)(cid:71)
(cid:38)(cid:51)(cid:42) (cid:54) (cid:49)(cid:72) (cid:72) (cid:42) (cid:55)(cid:84) (cid:88) (cid:85) (cid:85)(cid:88) (cid:85) (cid:68) (cid:72)(cid:68)(cid:72) (cid:83) (cid:72)(cid:79)(cid:81) (cid:3) (cid:75) (cid:16)(cid:49)(cid:70) (cid:37)(cid:16)(cid:72) (cid:69)(cid:72) (cid:68)(cid:16) (cid:68)(cid:87) (cid:86)(cid:69) (cid:90) (cid:86) (cid:72)(cid:68) (cid:72)(cid:82) (cid:71)(cid:86) (cid:71)(cid:85)(cid:72) (cid:78)(cid:86)(cid:71) (cid:39)(cid:72)(cid:83)(cid:72)(cid:51)(cid:38) (cid:81)(cid:72) (cid:11)(cid:82) (cid:38)(cid:71)(cid:85)(cid:81) (cid:80) (cid:72) (cid:51)(cid:87)(cid:72) (cid:81)(cid:76) (cid:39)(cid:86)(cid:91) (cid:70)(cid:86) (cid:42)(cid:87) (cid:92)(cid:88) (cid:76) (cid:3)(cid:82) (cid:12)(cid:42)(cid:68) (cid:81)(cid:79) (cid:85)(cid:68)(cid:83)(cid:75) (cid:54)(cid:49)(cid:41) (cid:72)(cid:72) (cid:42)(cid:72) (cid:84) (cid:55)(cid:88) (cid:85)(cid:68) (cid:88) (cid:85)(cid:85) (cid:68) (cid:72)(cid:87) (cid:68) (cid:72) (cid:83)(cid:88) (cid:72)(cid:79) (cid:81)(cid:3) (cid:75)(cid:85) (cid:16)(cid:49)(cid:72) (cid:70) (cid:69)(cid:16)(cid:72) (cid:69)(cid:16) (cid:72) (cid:68)(cid:69) (cid:16) (cid:68)(cid:87) (cid:86)(cid:69)(cid:68) (cid:90) (cid:86) (cid:72)(cid:68)(cid:86) (cid:72)(cid:82) (cid:71)(cid:72) (cid:86) (cid:71)(cid:85) (cid:72)(cid:78)(cid:71) (cid:86) (cid:71) (cid:50)(cid:83)(cid:70)(cid:82)(cid:71)(cid:72)(cid:3)(cid:54)(cid:72)(cid:84)(cid:88)(cid:72)(cid:81)(cid:70)(cid:72)(cid:86) (cid:54)(cid:54)(cid:49)(cid:41) (cid:72)(cid:72)(cid:72)(cid:72)(cid:55) (cid:84)(cid:84)(cid:88)(cid:68)(cid:85) (cid:88)(cid:88)(cid:85)(cid:72) (cid:87) (cid:68) (cid:72)(cid:72)(cid:88)(cid:72) (cid:79) (cid:81)(cid:81)(cid:3)(cid:85)(cid:16) (cid:49)(cid:72) (cid:70)(cid:70)(cid:37) (cid:72)(cid:72)(cid:16) (cid:72)(cid:68) (cid:69) (cid:16)(cid:16)(cid:87)(cid:86) (cid:69)(cid:69)(cid:68) (cid:90)(cid:72) (cid:68)(cid:68)(cid:86) (cid:82)(cid:71) (cid:72) (cid:86)(cid:86)(cid:85) (cid:72)(cid:72)(cid:78)(cid:71) (cid:86) (cid:71)(cid:71) (cid:54)(cid:76)(cid:80)(cid:83)(cid:79)(cid:76)(cid:73)(cid:76)(cid:72)(cid:71)(cid:3)(cid:38)(cid:51)(cid:42) (cid:54)(cid:49) (cid:72)(cid:72) (cid:42) (cid:84) (cid:55)(cid:88) (cid:85) (cid:88) (cid:85)(cid:85) (cid:68) (cid:72)(cid:68) (cid:72)(cid:83) (cid:72)(cid:79) (cid:81)(cid:3) (cid:75) (cid:16)(cid:49) (cid:70) (cid:69)(cid:16) (cid:72)(cid:69)(cid:72) (cid:68)(cid:16)(cid:68)(cid:87) (cid:86)(cid:69)(cid:90) (cid:86) (cid:72)(cid:68)(cid:72)(cid:82) (cid:71)(cid:86)(cid:71)(cid:85) (cid:72)(cid:78)(cid:86) (cid:71) (cid:86)(cid:76)(cid:54) (cid:54)(cid:72) (cid:92)(cid:57) (cid:57)(cid:38) (cid:38)
(cid:54) (cid:54)(cid:49) (cid:49)(cid:72) (cid:72)(cid:72) (cid:72)(cid:84) (cid:84)(cid:88) (cid:88)(cid:88) (cid:88)(cid:85) (cid:85)(cid:68) (cid:68)(cid:72) (cid:72)(cid:79) (cid:79)(cid:81) (cid:81)(cid:3) (cid:3)(cid:49) (cid:49)(cid:70) (cid:70)(cid:72) (cid:72)(cid:72) (cid:72)(cid:16) (cid:16)(cid:87) (cid:87)(cid:69) (cid:69)(cid:90) (cid:90)(cid:68) (cid:68)(cid:82) (cid:82)(cid:86) (cid:86)(cid:85) (cid:85)(cid:72) (cid:72)(cid:78) (cid:78)(cid:86) (cid:86)(cid:71) (cid:71)
(cid:38) (cid:42)(cid:82)(cid:71) (cid:85)(cid:68)(cid:72) (cid:83)(cid:3)(cid:36) (cid:75)(cid:74) (cid:3)(cid:11)(cid:74) (cid:38)(cid:85)(cid:72) (cid:36)(cid:74) (cid:42)(cid:68) (cid:12)(cid:87)(cid:72) (cid:54)(cid:49) (cid:72)(cid:72) (cid:42) (cid:84) (cid:55)(cid:88) (cid:85) (cid:88) (cid:85)(cid:85) (cid:68) (cid:72)(cid:68) (cid:72)(cid:83) (cid:72)(cid:79) (cid:81)(cid:3) (cid:75) (cid:16)(cid:49) (cid:70) (cid:69)(cid:16) (cid:72)(cid:69)(cid:72) (cid:68)(cid:16)(cid:68)(cid:87) (cid:86)(cid:69)(cid:90) (cid:86) (cid:72)(cid:68)(cid:72)(cid:82) (cid:71)(cid:86)(cid:71)(cid:85) (cid:72)(cid:78)(cid:86) (cid:71) (cid:38)(cid:82)(cid:81)(cid:87)(cid:85)(cid:68)(cid:70)(cid:87)(cid:3)(cid:42)(cid:85)(cid:68)(cid:83)(cid:75) (cid:54)(cid:49) (cid:72)(cid:72) (cid:42) (cid:84)(cid:55) (cid:55)(cid:88) (cid:85) (cid:88)(cid:85) (cid:85)(cid:85) (cid:68)(cid:72) (cid:72)(cid:68) (cid:72)(cid:83)(cid:72) (cid:72)(cid:79) (cid:81)(cid:3) (cid:75)(cid:16) (cid:16)(cid:49) (cid:70)(cid:69) (cid:69)(cid:16) (cid:72)(cid:69)(cid:72)(cid:68) (cid:68) (cid:16)(cid:68)(cid:87)(cid:86) (cid:86) (cid:69)(cid:90) (cid:86)(cid:72) (cid:72) (cid:68)(cid:72)(cid:82)(cid:71) (cid:71) (cid:86)(cid:71)(cid:85) (cid:72)(cid:78)(cid:86) (cid:71)
(cid:51)(cid:68)(cid:85)(cid:86)(cid:72)(cid:3)(cid:55)(cid:85)(cid:72)(cid:72) (cid:54)(cid:49)(cid:41) (cid:72)(cid:72) (cid:42)(cid:72) (cid:84) (cid:55)(cid:88) (cid:85)(cid:68) (cid:88) (cid:85)(cid:85) (cid:68) (cid:72)(cid:87) (cid:68) (cid:72) (cid:83)(cid:88) (cid:72)(cid:79) (cid:81)(cid:3) (cid:75)(cid:85) (cid:16)(cid:49)(cid:72) (cid:70) (cid:69)(cid:16)(cid:72) (cid:69)(cid:16) (cid:72) (cid:68)(cid:69) (cid:16) (cid:68)(cid:87) (cid:86)(cid:69)(cid:68) (cid:90) (cid:86) (cid:72)(cid:68)(cid:86) (cid:72)(cid:82) (cid:71)(cid:72) (cid:86) (cid:71)(cid:85) (cid:72)(cid:78)(cid:71) (cid:86) (cid:71) (cid:54)(cid:79)(cid:76)(cid:70)(cid:72)(cid:3)(cid:51)(cid:85) (cid:11)(cid:82) (cid:54)(cid:83) (cid:51)(cid:72) (cid:42)(cid:85)(cid:87) (cid:12)(cid:92)(cid:3)(cid:42)(cid:85)(cid:68)(cid:83)(cid:75) (cid:54)(cid:49) (cid:72)(cid:72)(cid:42) (cid:84) (cid:55)(cid:88)(cid:85) (cid:88) (cid:85)(cid:85)(cid:68) (cid:72)(cid:68) (cid:72)(cid:83) (cid:72)(cid:79) (cid:81)(cid:3)(cid:75) (cid:16)(cid:49) (cid:70) (cid:69)(cid:16) (cid:72)(cid:69) (cid:72) (cid:68)(cid:16)(cid:68) (cid:87) (cid:86)(cid:69)(cid:90)(cid:86) (cid:72)(cid:68)(cid:72) (cid:82) (cid:71)(cid:86)(cid:71) (cid:85) (cid:72)(cid:78)(cid:86) (cid:71) (cid:55)(cid:82)(cid:78)(cid:72)(cid:81)(cid:3)(cid:42)(cid:85)(cid:68)(cid:83)(cid:75) (cid:54)(cid:49) (cid:72)(cid:72)(cid:42) (cid:84) (cid:55)(cid:88)(cid:85) (cid:88) (cid:85)(cid:85)(cid:68) (cid:72)(cid:68) (cid:72)(cid:83) (cid:72)(cid:79) (cid:81)(cid:3)(cid:75) (cid:16)(cid:49) (cid:70) (cid:69)(cid:16) (cid:72)(cid:69) (cid:72) (cid:68)(cid:16)(cid:68) (cid:87) (cid:86)(cid:69)(cid:90)(cid:86) (cid:72)(cid:68)(cid:72) (cid:82) (cid:71)(cid:86)(cid:71) (cid:85) (cid:72)(cid:78)(cid:86) (cid:71) Fig.9. Relationshipsbetweenmodelsandrepresentations RQ5Findings: Sequence-basedisthemostcommonandpopularcategoryofmodels. • SVMsoverallarethemostpopularmodelusedfordi￿erenttasksduetoitsabilitytodiscriminatefeatures • thatwouldgroupcodeintoonegrouporanother. Thestudiedpapershaveaheavierfocusonadjustingthemodelsratherthaninvestigatingtherepresentation • ofsourcecodeonthewhole. 10 THREATSTOVALIDITY Inthissection,wewilldiscussthethreatstovalidityofthissurveyaroundconstruct,internal,andexternal validitythreatsasoutlinedbyRunesonandHöst[206]. Constructvalidityreferstohowwelltheoperationalmeasuresthatareusedrepresenttheresearchquestions thatarebeingstudied.Inourstudy,thesemeasuresmainlyinvolvecountsoftasksandrepresentations,aswellas therelationshipbetweenthetwo,andtherelationshipsbetweenrepresentationsandmodels.Thus,ouranalysis reliesontheaccuracyofthethereviewerswhilewewerecategorizingeachofthepapers.Additionally,when searchingforthepapers,wereliedontheabilityofthesearchenginesweusedtoreturntousallthepapersthat wererelatedtooursearchquery.Tomitigatetheseissues,wehadthereviewersseparatelyanalyzeeachpaper, andlaterdiscussandresolveanydiscrepanciesintheanalyses.Wealsocreatedathoroughandbroadenough querytoensuretheresultswereceivedfromoursearchencompassedallthepapersrelatedtothisstudy. Internalvaliditydescribeshowwellastudymitigatesbiasandsystematicerrorsuchthatacausalconclusioncan bedrawn.Duetothepotentialthreatofincorrectlycategorizingapaper,wehadtwooftheauthorsindividually reviewthepapers,andthenmeettodiscussanydiscrepanciesbetweenthecategorizations.Disagreementswere resolvedthroughdiscussion.Toassessthereliabilityofourevaluation,weusedCohen’sKappa.Ourcalculated scoreis0.97,meaningthatwehadanearperfectagreementinouranalysis[207]. ACMComput.Surv.,Vol.37,No.4,Article111.Publicationdate:March2024.ASurveyofSourceCodeRepresentationsforMachineLearning-BasedCybersecurityTasks • 111:25
Externalvalidityassessesthegeneralizabilityofthestudy.Themainthreattothisworkisthatweonlyfocus onthepasttenandahalfyears(2012-May2023),sowemayhavemissedpapersoutsideofthisrange.Wealso didnotincludepreprintsandthusmighthavemissednewerpapers.Additionally,ourkeywordsrelyonmachine learning,andanypapersthatdonotincludethesekeywordsmayhavebeenmissed.Finally,weonlylookedat threespeci￿csources:ACM,IEEE,andSpringerLink.Whiletheremayhavebeenpapersoutsideofthesethree sourcesrelevanttoourwork,theywereoutsideofthescopeofsearchforthispaper. 11 CONCLUSIONANDDISCUSSION Inthissection,wediscussour￿ndings,sharerecommendationsforfutureworks,andconcludethework. 11.1 Discussion 11.1.1 Representations. Weobserved42di￿erentrepresentationsonthesurveyedpapers.Therewereanabun- danceofgraph-basedrepresentations,eachhavingtheirownadvantagesanddisadvantages.Forexample,a control￿owgraphallowsamodeltounderstandhowafunctionwillactuallybehaveduringexecution.Thus,this representationcanassistmodelsinlearningprogramexecutionpatternsthatcouldleadtosecurityrisksincode [35]which,inturn,makesitusefulinvulnerabilitydetectionandmalwareclassi￿cation.However,giventhat procedurescouldhavehundredsofinstructions,withhundreds(ormore)ofedgesconnectingtherelationships betweenthebasicblocks,buildingthesegraphscanbequitecomplexandtime-consuming[208]. Programslicescanalsobeusefulfordetectingvulnerabilitiesbecauseoftheirabilitytopinpointwhereina programvalueschange,butgiventhecomplexityofcreatingthegraph(whichincludescreatingacallgraph, PDG,andSDG),itisunderstandablethatnotmanytechniquesusedthisasarepresentation.Thesameappliesfor CPGsandanyotherrobustrepresentationingeneral:thereisatrade-o￿betweenthetimeittakestobuildthese representationsandtheperformanceoftherepresentationinthemodel.Imagesareasimplewaytocreatea representationofsourcecode.Whileimagesarelimitedtotaskssuchasdetectionandprediction,itisagoodway tomakeuseoftechnologythatalreadyexists.Similarly,codemetricsareasimplerepresentationtocompute, anddonotrequireextensiveengineeringe￿orts.Thisrepresentation,however,lackstheabilitytocapturethe program’sbehavior.Inotherwords,thedataitusesismorelikemetadataaboutthecode.Thismeansthatfor, forexamplevulnerabilitydetection,thereisnowaytoidentifywhereexactlyavulnerabilityislocatedina ￿le;thegranularityofthismethodisnotvery￿ne.Thereare,however,weaknessesandlimitationswithrobust representationsaswell,suchastheinabilityforCPGstodetectissuesinruntimebehavior Therearedi￿erentbene￿tsorreasoningforchoosingcertainrepresentations.Forexample,tokenizing,speci￿cally usingNLPtechniqueslikeword2vec[57],allowsforthemodeltounderstandthesemanticmeaningofthecode. Thiswouldallowthemodeltobetterseeifaparticularlineofcodewouldcauseavulnerabilitybecausetypically avulnerabilitymaynotdependonthestructureofthecode.Instead,whatthecodeisactuallydoingwilldescribe thevulnerability.However,somevulnerabilitiescanbearesultofthestructureofcode.Thewayaprogram￿ows orisorganizedcouldallowanattackertotakeadvantageofasystem.Thus,itmakessensethatanAST,orsome variationofit,wouldbeapopularande￿ectivemethodforvulnerabilitydetection.AlthoughASTsaresimpleto make,theylackgranularity[63]duetothecomplexnatureofsourcecode,whichcouldbeo￿eredbyothermore complexgraphrepresentations,e.g.,control￿owgraphs.Certainrepresentationshavetoolsthatmakeiteasyto createthem.Forexample,callgraphscanbegeneratedinmanywayssuchasbyusingatoollikeSoot[97]or CScout[209]. Table5demonstratesthedi￿erentabilitiesoftheserepresentationsbasedonsomeofthekeyfeaturesthatthese di￿erentrepresentationshave.‘Lightweight’referstohowcomplextherepresentationistocreate.Forexample,as discussedabove,ASTsaresimpletogenerate.Becausetheircomputationalcomplexityisrelativelylow,theycan ACMComput.Surv.,Vol.37,No.4,Article111.Publicationdate:March2024.111:26 • BeatriceCasey,JoannaC.S.Santos,andGeorgePerry Table5. Abilitiesthedi￿erentrepresentationsinthisstudyhave.Representationsthathavethesameabilitiesaregrouped together.BPEisBPESubwordTokenization,Op.Seq.isOpcodeSequences,andApp.InformationisApplicationInformation Structure Semantics DataFlow StatementFlow Lightweight Interprocedural CPG,PDG,ProgramSlices,SDG,SPG,CAG 3 3 3 3 7 3 VFG,Contract/SemanticGraph 3 3 3 3 7 7 ProgramGraph,PropagationChain CADG,PropertyGraph,CPDG 3 7 3 3 7 3 CICFG,CallGraph 3 7 7 3 7 3 SCPG,CSSDG 3 7 3 3 7 7 AST,ParseTree,Image 3 7 7 7 3 7 Tokenizer,doc2vec,codeBERT,BPE,Op.Seq. 7 3 7 7 3 7 iSeVC,CodeGadgets 3 3 3 3 3 7 ContractSnippet 7 3 7 3 3 7 App.Information,APICalls,CodeMetrics 7 7 7 7 3 7 CFG 3 7 7 3 7 7 DFG 3 3 3 7 7 7 ICFG 3 7 7 3 7 3 CDG 3 3 7 7 7 7 CBG 3 3 7 3 7 7 CDFG 3 7 3 7 7 7 TokenGraph 7 3 7 7 7 7 sSyVC 3 3 7 7 3 7 RegularExpression 3 7 7 3 3 7 beconsideredlightweight.Programslices,ontheotherhand,havealotofoverheadandthusarenotconsidered lightweight. 11.1.2 Tasks. Most of these cybersecurity tasks fall under the testing category, as developers want to test the security oftheir code before deploying it. However, thisbreakdown oftasks into the RUP cycles allows researcherstoseeareasofopportunitiesforfutureresearch,suchastoolswhichfocusonthesoftwaredevelopment environment,implementation,andcon￿gurationandchangemanagement. 11.1.3 ProgrammingLanguages. DespitethepopularityofPythonandJavaScript,therewerenotalotoftools fortheselanguages.Iftheyareusedsomuchinpractice,wecouldexpecttheretobeanaccordingnumberof techniquesthataimtocoverthem,particularlyforsecurity-relatedassistance.Itisalsopossiblethatthereare notenoughdatasetsfortheselanguagesandbecauseofthis,therearenotmanytechniquesforthembecause researchersareunabletohavethenecessarydatatotrainandtestthemodels.Nonetheless,thisisagapthat shouldbeaddressedbyresearchers.
Unsurprisingly,therewerenolanguage-agnostictechniques.Makingatoollanguage-agnosticisdi￿cultdueto thevarietyofprogrammingparadigmsthatexist,andthatalllanguagesdonotfollowoneparticularparadigm. Thisdoesmean,however,thatresearchersshouldbediligentindevelopingtechniquesthatsupportpopularand commonlyusedlanguages. Fromthe140paperswehavestudied,wenoticedthatratherthanattemptingto￿ndanewormorecomprehensive waytorepresentsourcecode,priorworksarefocusedontryingdi￿erentmodelsornewmodelswithmore advancedarchitecturessothatthemodelcanlearnmorefromtherepresentation,withoutchangingit.Giventhe improvementsinpowerandcapabilityofmachinelearningmodelsinrecentyears,itisunderstandablewhy researcherswouldtakethisapproach.However,itisimportanttorememberthatthewayamodellearnsis ACMComput.Surv.,Vol.37,No.4,Article111.Publicationdate:March2024.ASurveyofSourceCodeRepresentationsforMachineLearning-BasedCybersecurityTasks • 111:27 impactedgreatlybythefeaturerepresentations,asitcanallowthemodeltolearnandisolatecriticalinformation thatisimportantforittosuccessfullycompleteitstask. 11.2 Recommendations Basedontheresults,werecommendfutureworkstoexploredi￿erentrepresentationsforaparticulartaskrather thanjust￿ne-tuningthemodel.Sincethemodellearnsfeaturesofthesourcecodefromtherepresentationand embeddingscreatedfromtherepresentation,therewouldbeagreaterimprovementintheperformanceofthese modelsifmoreattentionispaidtotherepresentations.Itisalsoimportantforresearcherstopayattentionto thepopularityoflanguages,asthereshouldbeane￿orttocreatetechniquesandtoolsthataddresspopularand morefrequentlyusedlanguagestoensurethatwecanavoidasmanysecurityrisksaspossible. 11.3 Conclusions Inthispaper,wesummarizethestateofthe￿eldofsourcecoderepresentationsinmachinelearningmodelsfor cybersecurityrelatedsoftwareengineeringtasks.Westudied140papersoutofaninitial64,803andfoundthemost commontasks,representationsused,languagescovered,andtherelationshipsbetweentasksandrepresentations. WefoundthatanASTrepresentationandatokenizedrepresentationarethemostcommon,andvulnerability detection,malwaredetectionandvulnerabilitypredictionarethemostcoveredtasksbyexistingtechniques. Additionally,wefoundthatthelanguagecoveredbythemosttechniquesisC,followedbyC++. REFERENCES [1] JoannaCSSantos,KatyTarrit,andMehdiMirakhorli. Acatalogofsecurityarchitectureweaknesses. In2017IEEEInternational ConferenceonSoftwareArchitectureWorkshops(ICSAW),pages220–223.IEEE,2017. [2] I.Alexander.Misusecases:usecaseswithhostileintent.IEEESoftware,20(1):58–66,2003. [3] J.McDermottandC.Fox.Usingabusecasemodelsforsecurityrequirementsanalysis.InProceedings15thAnnualComputerSecurity ApplicationsConference(ACSAC’99),pages55–64,1999. [4] A.Shostack.Threatmodelingdesigningforsecurity.Wiley,2014. [5] S.T.Halkidis,N.Tsantalis,A.Chatzigeorgiou,andG.Stephanides.Architecturalriskanalysisofsoftwaresystemsbasedonsecurity patterns.IEEETransactionsonDependableandSecureComputing,5(3):129–142,2008. [6] J.Ryoo,P.Laplante,andR.Kazman.Amethodologyforminingsecuritytacticsfromsecuritypatterns.In201043rdHawaiiInternational ConferenceonSystemSciences,pages1–5,2010. [7] LarissaBraz,EnricoFregnan,GülÇalikli,andAlbertoBacchelli.Whydon’tdevelopersdetectimproperinputvalidation?’;droptable papers;–.In2021IEEE/ACM43rdInternationalConferenceonSoftwareEngineering(ICSE),pages499–511.IEEE,2021. [8] B.Arkin,S.Stender,andG.McGraw.Softwarepenetrationtesting.IEEESecurity&Privacy,3(1):84–87,2005. [9] B.ChessandG.McGraw.Staticanalysisforsecurity.IEEESecurity&Privacy,2(6):76–79,2004. [10] A.RussoandA.Sabelfeld. Dynamicvs.static￿ow-sensitivesecurityanalysis. In201023rdIEEEComputerSecurityFoundations Symposium,pages186–199,2010. [11] G.Lin,W.Xiao,J.Zhang,andY.Xiang.Deeplearning-basedvulnerablefunctiondetection:Abenchmark.InJianyingZhou,Xiapu Luo,QingniShen,andZhenXu,editors,InformationandCommunicationsSecurity,pages219–232,Cham,2020.SpringerInternational Publishing. [12] G.RenjithandS.Aji.Vulnerabilityanalysisanddetectionusinggraphneuralnetworksforandroidoperatingsystem.InSomanath Tripathy,RudrapatnaK.Shyamasundar,andRajivRanjan,editors,InformationSystemsSecurity,pages57–72,Cham,2021.Springer InternationalPublishing. [13] N.Guo,X.Li,H.Yin,andY.Gao.Vulhunter:Anautomatedvulnerabilitydetectionsystembasedondeeplearningandbytecode.In JianyingZhou,XiapuLuo,QingniShen,andZhenXu,editors,InformationandCommunicationsSecurity,pages199–218,Cham,2020. SpringerInternationalPublishing. [14] J.Chen,B.Liu,S.Cai,W.Wang,andS.Wang.Aidetectorx:Avulnerabilitydetectorbasedontcnandself-attentionmechanism.In ShengchaoQin,JimWoodcock,andWenhuiZhang,editors,DependableSoftwareEngineering.Theories,Tools,andApplications,pages 161–177,Cham,2021.SpringerInternationalPublishing. [15] B.Mosolygó,N.Vándor,P.Hegedűs,andR.Ferenc.Aline-levelexplainablevulnerabilitydetectionapproachforjava.InOsvaldoGervasi, BeniaminoMurgante,SanjayMisra,AnaMariaA.C.Rocha,andChiaraGarau,editors,ComputationalScienceandItsApplications– ACMComput.Surv.,Vol.37,No.4,Article111.Publicationdate:March2024.111:28 • BeatriceCasey,JoannaC.S.Santos,andGeorgePerry ICCSA2022Workshops,pages106–122,Cham,2022.SpringerInternationalPublishing.
[16] G.Lin,J.Zhang,W.Luo,L.Pan,Y.Xiang,O.DeVel,andP.Montague.Cross-projecttransferrepresentationlearningforvulnerable functiondiscovery.IEEETransactionsonIndustrialInformatics,14(7):3289–3297,2018. [17] L.Cen,C.S.Gates,L.Si,andN.Li.Aprobabilisticdiscriminativemodelforandroidmalwaredetectionwithdecompiledsourcecode. IEEETransactionsonDependableandSecureComputing,12(4):400–412,2015. [18] A.Narayanan,L.Yang,L.Chen,andL.Jinliang.Adaptiveandscalableandroidmalwaredetectionthroughonlinelearning.In2016 InternationalJointConferenceonNeuralNetworks(IJCNN),pages2484–2491,2016. [19] A.Narayanan,M.Chandramohan,L.Chen,andY.Liu. Context-aware,adaptiveandscalableandroidmalwaredetectionthrough onlinelearning(extendedversion),2017. [20] E.Mariconti,L.Onwuzurike,P.Andriotis,E.DeCristofaro,G.Ross,andG.Stringhini.Mamadroid:Detectingandroidmalwareby buildingmarkovchainsofbehavioralmodels,2017. [21] P.Zegzhda,D.Zegzhda,E.Pavlenko,andG.Ignatev.Applyingdeeplearningtechniquesforandroidmalwaredetection.InProceedingsof the11thInternationalConferenceonSecurityofInformationandNetworks,SIN’18,NewYork,NY,USA,2018.AssociationforComputing Machinery. [22] J.Allen,M.Landen,S.Chaba,Y.Ji,S.P.H.Chung,andW.Lee.Improvingaccuracyofandroidmalwaredetectionwithlightweight contextualawareness.InProceedingsofthe34thAnnualComputerSecurityApplicationsConference,ACSAC’18,page210–221,New York,NY,USA,2018.AssociationforComputingMachinery. [23] J.D.Koli.Randroid:Androidmalwaredetectionusingrandommachinelearningclassi￿ers.In2018TechnologiesforSmart-CityEnergy SecurityandPower(ICSESP),pages1–6,2018. [24] Z.Wang,J.Cai,S.Cheng,andW.Li.Droiddeeplearner:Identifyingandroidmalwareusingdeeplearning.In2016IEEE37thSarno￿ Symposium,pages160–165,2016. [25] N.Xie,F.Zeng,X.Qin,Y.Zhang,M.Zhou,andC.Lv. Repassdroid:Automaticdetectionofandroidmalwarebasedonessential permissionsandsemanticfeaturesofsensitiveapis.In2018InternationalSymposiumonTheoreticalAspectsofSoftwareEngineering (TASE),pages52–59,2018. [26] A.Narayanan,M.Chandramohan,L.Chen,andY.Liu. Amulti-viewcontext-awareapproachtoandroidmalwaredetectionand maliciouscodelocalization.EmpiricalSoftwareEngineering,23(3):1222–1274,2017. [27] C.Yang,Z.Xu,G.Gu,V.Yegneswaran,andP.Porras.Droidminer:Automatedminingandcharacterizationof￿ne-grainedmalicious behaviorsinandroidapplications. InMirosławKutyłowskiandJaideepVaidya,editors,ComputerSecurity-ESORICS2014,pages 163–182,Cham,2014.SpringerInternationalPublishing. [28] L.Braz,C.Aeberhard,G.Çalikli,andA.Bacchelli.Lessismore:Supportingdevelopersinvulnerabilitydetectionduringcodereview. InProceedingsofthe44thInternationalConferenceonSoftwareEngineering,ICSE’22,page1317–1329,NewYork,NY,USA,2022. AssociationforComputingMachinery. [29] J.Chi,Y.Qu,T.Liu,Q.Zheng,andH.Yin.Seqtrans:Automaticvulnerability￿xviasequencetosequencelearning.IEEETransactions onSoftwareEngineering,49(2):564–585,2023. [30] F.AlDebeyan,T.Hall,andD.Bowes.Improvingtheperformanceofcodevulnerabilitypredictionusingabstractsyntaxtreeinformation. InProceedingsofthe18thInternationalConferenceonPredictiveModelsandDataAnalyticsinSoftwareEngineering,PROMISE2022,page 2–11,NewYork,NY,USA,2022.AssociationforComputingMachinery. [31] A.Sej￿aandM.Schäfer.Practicalautomateddetectionofmaliciousnpmpackages.InProceedingsofthe44thInternationalConference onSoftwareEngineering,pages1681–1692,PittsburghPennsylvania,May2022.ACM. [32] G.E.deP.Rodrigues,A.M.Braga,andR.Dahab.Usinggraphembeddingsandmachinelearningtodetectcryptographymisusein sourcecode.In202019thIEEEInternationalConferenceonMachineLearningandApplications(ICMLA),pages1059–1066,2020. [33] G.Partenza,T.Amburgey,L.Deng,J.Dehlinger,andS.Chakraborty.Automaticidenti￿cationofvulnerablecode:Investigationswith anast-basedneuralnetwork.In2021IEEE45thAnnualComputers,Software,andApplicationsConference(COMPSAC),pages1475–1482, 2021. [34] A.MesterandZ.Bodó.Malwareclassi￿cationbasedongraphconvolutionalneuralnetworksandstaticcallgraphfeatures.InHamido Fujita,PhilippeFournier-Viger,MoonisAli,andYinglinWang,editors,AdvancesandTrendsinArti￿cialIntelligence.Theoryand PracticesinArti￿cialIntelligence,pages528–539,Cham,2022.SpringerInternationalPublishing. [35] J.A.Hareretal.Automatedsoftwarevulnerabilitydetectionwithmachinelearning,2018. [36] S.Cao,X.Sun,L.Bo,Y.Wei,andB.Li. Bgnn4vd:Constructingbidirectionalgraphneural-networkforvulnerabilitydetection. InformationandSoftwareTechnology,136:106576,2021. [37] I.Kalouptsoglou,M.Siavvas,D.Kehagias,A.Chatzigeorgiou,andA.Ampatzoglou.Anempiricalevaluationoftheusefulnessofword embeddingtechniquesindeeplearning-basedvulnerabilityprediction.InErolGelenbe,MarijaJankovic,DionysiosKehagias,Anna Marton,andAndrasVilmos,editors,SecurityinComputerandInformationSciences,pages23–37,Cham,2022.SpringerInternational Publishing. ACMComput.Surv.,Vol.37,No.4,Article111.Publicationdate:March2024.ASurveyofSourceCodeRepresentationsforMachineLearning-BasedCybersecurityTasks • 111:29
[38] R.Feng,Z.Yan,S.Peng,andY.Zhang.Automateddetectionofpasswordleakagefrompublicgithubrepositories.In2022IEEE/ACM 44thInternationalConferenceonSoftwareEngineering(ICSE),pages175–186,2022. [39] N.Saccente,J.Dehlinger,L.Deng,S.Chakraborty,andY.Xiong.Projectachilles:Aprototypetoolforstaticmethod-levelvulnerability detectionofjavasourcecodeusingarecurrentneuralnetwork.In201934thIEEE/ACMInternationalConferenceonAutomatedSoftware EngineeringWorkshop(ASEW),pages114–121,2019. [40] D.Vagavolu,K.C.Swarna,andS.Chimalakonda.AMocktailofSourceCodeRepresentations.In202136thIEEE/ACMInternational ConferenceonAutomatedSoftwareEngineering(ASE),pages1296–1300,Melbourne,Australia,November2021.IEEE. [41] Z.Li,D.Zou,S.Xu,X.Ou,H.Jin,S.Wang,Z.Deng,andY.Zhong. VulDeePecker:Adeeplearning-basedsystemforvulnerability detection.InProceedings2018NetworkandDistributedSystemSecuritySymposium.InternetSociety,2018. [42] V.-A.Nguyen,D.Q.Nguyen,V.Nguyen,T.Le,Q.H.Tran,andD.Phung.Regvd:Revisitinggraphneuralnetworksforvulnerability detection.InProceedingsoftheACM/IEEE44thInternationalConferenceonSoftwareEngineering:CompanionProceedings,ICSE’22,page 178–182,NewYork,NY,USA,2022.AssociationforComputingMachinery. [43] M.Fu,C.Tantithamthavorn,T.Le,V.Nguyen,andD.Phung. VulRepair:aT5-basedautomatedsoftwarevulnerabilityrepair. In Proceedingsofthe30thACMJointEuropeanSoftwareEngineeringConferenceandSymposiumontheFoundationsofSoftwareEngineering, pages935–947,SingaporeSingapore,November2022.ACM. [44] X.Cheng,G.Zhang,H.Wang,andY.Sui.Path-sensitivecodeembeddingviacontrastivelearningforsoftwarevulnerabilitydetection. InProceedingsofthe31stACMSIGSOFTInternationalSymposiumonSoftwareTestingandAnalysis,ISSTA2022,page519–531,New York,NY,USA,2022.AssociationforComputingMachinery. [45] S.M.Gha￿arianandH.R.Shahriari.Neuralsoftwarevulnerabilityanalysisusingrichintermediategraphrepresentationsofprograms. InformationSciences,553:189–207,2021. [46] Y.Zhuang,Z.Liu,P.Qian,Q.Liu,X.Wang,andQ.He.Smartcontractvulnerabilitydetectionusinggraphneuralnetwork.InChristian Bessiere,editor,ProceedingsoftheTwenty-NinthInternationalJointConferenceonArti￿cialIntelligence,IJCAI-20,pages3283–3290. InternationalJointConferencesonArti￿cialIntelligenceOrganization,72020.Maintrack. [47] P.Qian,Z.Liu,Q.He,R.Zimmermann,andX.Wang.Towardsautomatedreentrancydetectionforsmartcontractsbasedonsequential models.IEEEAccess,8:19685–19695,2020. [48] Z.Li,D.Zou,S.Xu,Z.Chen,Y.Zhu,andH.Jin. Vuldeelocator:Adeeplearning-based￿ne-grainedvulnerabilitydetector. IEEE TransactionsonDependableandSecureComputing,19(4):2821–2837,2022. [49] B.KitchenhamandS.Charters.Guidelinesforperformingsystematicliteraturereviewsinsoftwareengineering.2,012007. [50] R.A.Kemmerer.Cybersecurity.In25thInternationalConferenceonSoftwareEngineering,2003.Proceedings.,pages705–715,2003. [51] X.Xia,Y.Wang,andY.Yang.Sourcecodevulnerabilitydetectionbasedonsar-gin.In20212ndInternationalConferenceonElectronics, CommunicationsandInformationTechnology(CECIT),pages1144–1149,2021. [52] F.Yamaguchi,N.Golde,D.Arp,andK.Rieck. Modelinganddiscoveringvulnerabilitieswithcodepropertygraphs. In2014IEEE SymposiumonSecurityandPrivacy,pages590–604,2014. [53] R.Scandariato,J.Walden,A.Hovsepyan,andW.Joosen.Predictingvulnerablesoftwarecomponentsviatextmining.IEEETransactions onSoftwareEngineering,40(10):993–1006,2014. [54] L.Burattietal.Exploringsoftwarenaturalnessthroughneurallanguagemodels,2020. [55] A.BernsteinandA.Kuleshov.Low-dimensionaldatarepresentationindataanalysis.InNeamatElGayar,FriedhelmSchwenker,and ChengSuen,editors,Arti￿cialNeuralNetworksinPatternRecognition,pages47–58,Cham,2014.SpringerInternationalPublishing. [56] M.Grohe.Word2vec,node2vec,graph2vec,x2vec:Towardsatheoryofvectorembeddingsofstructureddata.InProceedingsofthe39th ACMSIGMOD-SIGACT-SIGAISymposiumonPrinciplesofDatabaseSystems,PODS’20,page1–16,NewYork,NY,USA,2020.Association forComputingMachinery. [57] T.Mikolov,K.Chen,G.Corrado,andJ.Dean.E￿cientestimationofwordrepresentationsinvectorspace,2013. [58] A.Narayanan,M.Chandramohan,R.Venkatesan,L.Chen,Y.Liu,andS.Jaiswal.graph2vec:Learningdistributedrepresentationsof graphs,2017. [59] U.Alon,M.Zilberstein,O.Levy,andE.Yahav.Code2vec:Learningdistributedrepresentationsofcode.Proc.ACMProgram.Lang., 3(POPL),jan2019. [60] W.Ma,M.Zhao,E.Soremekun,Q.Hu,J.M.Zhang,M.Papadakis,M.Cordy,X.Xie,andY.L.Traon.GraphCode2Vec:genericcode embeddingvialexicalandprogramdependenceanalyses. InProceedingsofthe19thInternationalConferenceonMiningSoftware Repositories,pages524–536,PittsburghPennsylvania,May2022.ACM. [61] S.KhanandS.Parkinson. ReviewintoStateoftheArtofVulnerabilityAssessmentusingArti￿cialIntelligence,pages3–32. Springer InternationalPublishing,Cham,2018.
[62] V.H.S.Durelli,R.S.Durelli,S.S.Borges,A.T.Endo,M.M.Eler,D.R.C.Dias,andM.P.Guimarães. Machinelearningappliedto softwaretesting:Asystematicmappingstudy.IEEETransactionsonReliability,68(3):1189–1212,2019. [63] M.T.BinNazim,M.J.H.Faruk,H.Shahriar,M.A.Khan,M.Masum,N.Sakib,andF.Wu.Systematicanalysisofdeeplearningmodel forvulnerablecodedetection.In2022IEEE46thAnnualComputers,Software,andApplicationsConference(COMPSAC),pages1768–1773, ACMComput.Surv.,Vol.37,No.4,Article111.Publicationdate:March2024.111:30 • BeatriceCasey,JoannaC.S.Santos,andGeorgePerry 2022. [64] H.P.Samoaa,F.Bayram,P.Salza,andP.Leitner. Asystematicmappingstudyofsourcecoderepresentationfordeeplearningin softwareengineering.IETSoftware,16(4):351–385,2022. [65] Y.Yang,X.Xia,D.Lo,andJ.Grundy.Asurveyondeeplearningforsoftwareengineering,2020. [66] F.Ferreira,L.L.Silva,andM.T.Valente.Softwareengineeringmeetsdeeplearning:Amappingstudy,2020. [67] G.Lin,S.Wen,Q.-L.Han,J.Zhang,andY.Xiang.Softwarevulnerabilitydetectionusingdeepneuralnetworks:Asurvey.Proceedings oftheIEEE,108(10):1825–1848,2020. [68] T.Sonnekalb,T.SHeinze,andP.Mäder. [69] A.O.A.Semasaba,W.Zheng,X.Wu,andS.A.Agyemang.Literaturesurveyofdeeplearning-basedvulnerabilityanalysisonsource code.IETSoftware,14(6):654–664,2020. [70] S.M.Gha￿arianandH.R.Shahriari. SoftwareVulnerabilityAnalysisandDiscoveryUsingMachine-LearningandData-Mining Techniques:ASurvey.ACMComputingSurveys,50(4):1–36,jul2018. [71] J.Wu.Literaturereviewonvulnerabilitydetectionusingnlptechnology,2021. [72] Z.ChenandM.Monperrus.Aliteraturestudyofembeddingsonsourcecode,2019. [73] Z.Kotti,R.Galanopoulou,andD.Spinellis.Machinelearningforsoftwareengineering:Atertiarystudy.ACMComput.Surv.,55(12), mar2023. [74] L.E.Lwakatare,A.Raj,J.Bosch,H.H.Olsson,andI.Crnkovic.Ataxonomyofsoftwareengineeringchallengesformachinelearning systems:Anempiricalinvestigation.InPhilippeKruchten,StevenFraser,andFrançoisCoallier,editors,AgileProcessesinSoftware EngineeringandExtremeProgramming,pages227–243,Cham,2019.SpringerInternationalPublishing. [75] H.Hanif,M.H.NizamMdNasir,M.FaizalAbRazak,A.Firdaus,andN.B.Anuar.Theriseofsoftwarevulnerability:Taxonomyof softwarevulnerabilitiesdetectionandmachinelearningapproaches.JournalofNetworkandComputerApplications,179:103009,2021. [76] M.Usman,M.A.Jan,X.He,andJ.Chen.Asurveyonrepresentationlearninge￿ortsincybersecuritydomain.ACMComput.Surv., 52(6),oct2019. [77] M.Macas,C.Wu,andW.Fuertes.Asurveyondeeplearningforcybersecurity:Progress,challenges,andopportunities.Computer Networks,212:109032,2022. [78] V.Freitas.Parsifal,apr2023.[Online;accessedDec.2022]. [79] Y.Zhou,S.Liu,J.Siow,X.Du,andY.Liu.Devign:E￿ectivevulnerabilityidenti￿cationbylearningcomprehensiveprogramsemantics viagraphneuralnetworks.InH.Wallach,H.Larochelle,A.Beygelzimer,F.d'Alché-Buc,E.Fox,andR.Garnett,editors,Advancesin NeuralInformationProcessingSystems,volume32.CurranAssociates,Inc.,2019. [80] H.H.Nguyen,N.-M.Nguyen,C.Xie,Z.Ahmadi,D.Kudendo,T.-N.Doan,andL.Jiang. Mando:Multi-levelheterogeneousgraph embeddingsfor￿ne-graineddetectionofsmartcontractvulnerabilities,2022. [81] Y.Zhuang,S.Suneja,V.Thost,G.Domeniconi,A.Morari,andJ.Laredo. Softwarevulnerabilitydetectionviadeeplearningover disaggregatedcodegraphrepresentation,2021. [82] X.Cheng,H.Wang,J.Hua,M.Zhang,G.Xu,L.Yi,andY.Sui. Staticdetectionofcontrol-￿ow-relatedvulnerabilitiesusinggraph embedding.In201924thInternationalConferenceonEngineeringofComplexComputerSystems(ICECCS),pages41–50,2019. [83] G.Grieco,G.L.Grinblat,L.Uzal,S.Rawat,J.Feist,andL.Mounier.Towardlarge-scalevulnerabilitydiscoveryusingmachinelearning. InProceedingsoftheSixthACMConferenceonDataandApplicationSecurityandPrivacy,CODASPY’16,page85–96,NewYork,NY, USA,2016.AssociationforComputingMachinery. [84] J.Kronjee,A.Hommersom,andH.Vranken.Discoveringsoftwarevulnerabilitiesusingdata-￿owanalysisandmachinelearning.In Proceedingsofthe13thInternationalConferenceonAvailability,ReliabilityandSecurity,ARES’18,NewYork,NY,USA,2018.Association forComputingMachinery. [85] K.Cheng,G.Du,T.Wu,L.Chen,andG.Shi.Automatedvulnerablecodesmutationthroughdeeplearningforvariabilitydetection.In 2022InternationalJointConferenceonNeuralNetworks(IJCNN),pages1–8,2022. [86] T.Wu,L.Chen,G.Du,C.Zhu,N.Cui,andG.Shi.Inductivevulnerabilitydetectionviagatedgraphneuralnetwork.In2022IEEE25th InternationalConferenceonComputerSupportedCooperativeWorkinDesign(CSCWD),pages519–524,2022.
[87] R.Rabheru,H.Hanif,andS.Ma￿eis.Ahybridgraphneuralnetworkapproachfordetectingphpvulnerabilities.In2022IEEEConference onDependableandSecureComputing(DSC),pages1–9,2022. [88] Y.Wu,D.Zou,S.Dou,W.Yang,D.Xu,andHJin.Vulcnn:Animage-inspiredscalablevulnerabilitydetectionsystem.InProceedings ofthe44thInternationalConferenceonSoftwareEngineering,ICSE’22,page2365–2376,NewYork,NY,USA,2022.Associationfor ComputingMachinery. [89] Y.Li,S.Wang,andT.N.Nguyen. Vulnerabilitydetectionwith￿ne-grainedinterpretations. InProceedingsofthe29thACMJoint MeetingonEuropeanSoftwareEngineeringConferenceandSymposiumontheFoundationsofSoftwareEngineering,ESEC/FSE2021,page 292–303,NewYork,NY,USA,2021.AssociationforComputingMachinery. [90] W.An,L.Chen,J.Wang,G.Du,G.Shi,andD.Meng.Avdhram:Automatedvulnerabilitydetectionbasedonhierarchicalrepresentation andattentionmechanism.In2020IEEEIntlConfonParallel&DistributedProcessingwithApplications,BigData&CloudComputing, ACMComput.Surv.,Vol.37,No.4,Article111.Publicationdate:March2024.ASurveyofSourceCodeRepresentationsforMachineLearning-BasedCybersecurityTasks • 111:31 SustainableComputing&Communications,SocialComputing&Networking(ISPA/BDCloud/SocialCom/SustainCom),pages337–344, 2020. [91] T.Wu,L.Chen,G.Du,C.Zhu,andG.Shi.Self-attentionbasedautomatedvulnerabilitydetectionwithe￿ectivedatarepresentation. In2021IEEEIntlConfonParallel&DistributedProcessingwithApplications,BigData&CloudComputing,SustainableComputing& Communications,SocialComputing&Networking(ISPA/BDCloud/SocialCom/SustainCom),pages892–899,2021. [92] J.Zeng,X.Nie,L.Chen,J.Li,G.Du,andG.Shi.Ane￿cientvulnerabilityextrapolationusingsimilarityofgraphkernelofpdgs.In2020 IEEE19thInternationalConferenceonTrust,SecurityandPrivacyinComputingandCommunications(TrustCom),pages1664–1671,2020. [93] A.Watson,E.Ufuktepe,andK.Palaniappan.Detectingsoftwarecodevulnerabilitiesusing2dconvolutionalneuralnetworkswith programslicingfeaturemaps.In2022IEEEAppliedImageryPatternRecognitionWorkshop(AIPR),pages1–9,2022. [94] D.Zou,Y.Hu,W.Li,Y.Wu,H.Zhao,andH.Jin.mvulpreter:Amulti-granularityvulnerabilitydetectionsystemwithinterpretations. IEEETransactionsonDependableandSecureComputing,pages1–12,2022. [95] L.K.Shar,H.BengKuanTan,andL.C.Briand. Miningsqlinjectionandcrosssitescriptingvulnerabilitiesusinghybridprogram analysis.In201335thInternationalConferenceonSoftwareEngineering(ICSE),pages642–651,2013. [96] G.Yan,S.Chen,Y.Bail,andX.Li.Candeeplearningmodelslearnthevulnerablepatternsforvulnerabilitydetection?In2022IEEE 46thAnnualComputers,Software,andApplicationsConference(COMPSAC),pages904–913,2022. [97] S.Rasthofer,S.Arzt,andE.Bodden.Amachine-learningapproachforclassifyingandcategorizingandroidsourcesandsinks.012014. [98] L.Zhou,M.Huang,Y.Li,Y.Nie,J.Li,andY.Liu. Grapheye:Anovelsolutionfordetectingvulnerablefunctionsbasedongraph attentionnetwork.In2021IEEESixthInternationalConferenceonDataScienceinCyberspace(DSC),pages381–388,2021. [99] S.Suneja,Y.Zheng,Y.Zhuang,J.Laredo,andA.Morari.Learningtomapsourcecodetosoftwarevulnerabilityusingcode-as-a-graph, 2020. [100] Y.Ding,S.Suneja,Y.Zheng,J.Laredo,A.Morari,G.Kaiser,andB.Ray.Velvet:anovelensemblelearningapproachtoautomatically locatevulnerablestatements.In2022IEEEInternationalConferenceonSoftwareAnalysis,EvolutionandReengineering(SANER),pages 959–970,2022. [101] X.Duan,J.Wu,S.Ji,Z.Rui,T.Luo,M.Yang,andY.Wu. Vulsniper:Focusyourattentiontoshoot￿ne-grainedvulnerabilities. In ProceedingsoftheTwenty-EighthInternationalJointConferenceonArti￿cialIntelligence,IJCAI-19,pages4665–4671.InternationalJoint ConferencesonArti￿cialIntelligenceOrganization,72019. [102] H.Wu,Z.Zhang,S.Wang,Y.Lei,B.Lin,Y.Qin,H.Zhang,andX.Mao. Peculiar:Smartcontractvulnerabilitydetectionbasedon crucialdata￿owgraphandpre-trainingtechniques.In2021IEEE32ndInternationalSymposiumonSoftwareReliabilityEngineering (ISSRE),pages378–389,2021. [103] H.Wang,G.Ye,Z.Tang,S.H.Tan,S.Huang,D.Fang,Y.Feng,L.Bian,andZ.Wang.Combininggraph-basedlearningwithautomated datacollectionforcodevulnerabilitydetection.IEEETransactionsonInformationForensicsandSecurity,16:1943–1958,2021. [104] Z.Zhangetal. Reentrancyvulnerabilitydetectionandlocalization:Adeeplearningbasedtwo-phaseapproach. InProceedingsof the37thIEEE/ACMInternationalConferenceonAutomatedSoftwareEngineering,ASE’22,NewYork,NY,USA,2023.Associationfor ComputingMachinery. [105] Z.Liu,P.Qian,X.Wang,Y.Zhuang,L.Qiu,andX.Wang.Combininggraphneuralnetworkswithexpertknowledgeforsmartcontract vulnerabilitydetection.IEEETransactionsonKnowledgeandDataEngineering,35(2):1296–1310,2023. [106] L.K.Shar,L.C.Briand,andH.B.K.Tan.Webapplicationvulnerabilitypredictionusinghybridprogramanalysisandmachinelearning. IEEETransactionsonDependableandSecureComputing,12(6):688–707,2015.
[107] D.Zou,S.Wang,S.Xu,Z.Li,andH.Jin.`vuldeepecker:Adeeplearning-basedsystemformulticlassvulnerabilitydetection.IEEE TransactionsonDependableandSecureComputing,18(5):2224–2236,2021. [108] Z.Liu,P.Qian,X.Wang,L.Zhu,Q.He,andS.Ji.Smartcontractvulnerabilitydetection:Frompureneuralnetworktointerpretable graphfeatureandexpertpatternfusion,2021. [109] J.Gear,Y.Xu,E.Foo,P.Gauravaram,Z.Jadidi,andL.Simpson.Scevd:Semantic-enhancedcodeembeddingforvulnerabilitydiscovery. In2022IEEEInternationalConferenceonTrust,SecurityandPrivacyinComputingandCommunications(TrustCom),pages1522–1527, 2022. [110] Y.Wu,J.Lu,Y.Zhang,andS.Jin.Vulnerabilitydetectioninc/c++sourcecodewithgraphrepresentationlearning.In2021IEEE11th AnnualComputingandCommunicationWorkshopandConference(CCWC),pages1519–1524,2021. [111] W.Zheng,Y.Jiang,andX.Su.Vu1spg:Vulnerabilitydetectionbasedonslicepropertygraphrepresentationlearning.In2021IEEE 32ndInternationalSymposiumonSoftwareReliabilityEngineering(ISSRE),pages457–467,2021. [112] X.Cheng,H.Wang,J.Hua,G.Xu,andY.Sui. Deepwukong:Staticallydetectingsoftwarevulnerabilitiesusingdeepgraphneural network.ACMTrans.Softw.Eng.Methodol.,30(3),apr2021. [113] Y.Xue,J.Guo,L.Zhang,andH.Song.Messagepassinggraphneuralnetworksforsoftwaresecurityvulnerabilitydetection.In2022 InternationalConferenceonComputerNetwork,ElectronicandAutomation(ICCNEA),pages144–148,2022. [114] N.SarbakyshandZ.Wang. A1bert:Alanguage-agnosticgraphneuralnetworkmodelforvulnerabilitydetection. In20238th InternationalConferenceonDataScienceinCyberspace(DSC),pages205–212,2023. ACMComput.Surv.,Vol.37,No.4,Article111.Publicationdate:March2024.111:32 • BeatriceCasey,JoannaC.S.Santos,andGeorgePerry [115] Z.Chen,S.Kommrusch,andM.Monperrus.Neuraltransferlearningforrepairingsecurityvulnerabilitiesinccode.IEEETransactions onSoftwareEngineering,49(1):147–165,2023. [116] Z.Yu,C.Theisen,L.Williams,andT.Menzies.Improvingvulnerabilityinspectione￿ciencyusingactivelearning.IEEETransactions onSoftwareEngineering,47(11):2401–2420,2021. [117] A.BagheriandP.Hegedűs.Acomparisonofdi￿erentsourcecoderepresentationmethodsforvulnerabilitypredictioninpython.InAna C.R.Paiva,AnaRosaCavalli,PaulaVenturaMartins,andRicardoPérez-Castillo,editors,QualityofInformationandCommunications Technology,pages267–281,Cham,2021.SpringerInternationalPublishing. [118] M.FuandC.Tantithamthavorn.Linevul:Atransformer-basedline-levelvulnerabilityprediction.In2022IEEE/ACM19thInternational ConferenceonMiningSoftwareRepositories(MSR),pages608–620,2022. [119] B.Mosolygó,N.Vándor,G.Antal,P.Hegedűs,andR.Ferenc.Towardsaprototypebasedexplainablejavascriptvulnerabilityprediction model.In2021InternationalConferenceonCodeQuality(ICCQ),pages15–25,2021. [120] A.Mazuera-Rozo,A.Mojica-Hanke,M.Linares-Vásquez,andG.Bavota.Shallowordeep?anempiricalstudyondetectingvulnerabilities usingdeeplearning.In2021IEEE/ACM29thInternationalConferenceonProgramComprehension(ICPC),pages276–287,2021. [121] H.HanifandS.Ma￿eis.Vulberta:Simpli￿edsourcecodepre-trainingforvulnerabilitydetection.In2022InternationalJointConference onNeuralNetworks(IJCNN),pages1–8,2022. [122] H.K.Dam,T.Tran,T.Pham,S.W.Ng,J.Grundy,andA.Ghose. Automaticfeaturelearningforpredictingvulnerablesoftware components.IEEETransactionsonSoftwareEngineering,47(1):67–85,2021. [123] T.H.M.Le,D.Hin,R.Croft,andM.A.Babar. Deepcva:Automatedcommit-levelvulnerabilityassessmentwithdeepmulti-task learning. InProceedingsofthe36thIEEE/ACMInternationalConferenceonAutomatedSoftwareEngineering,ASE’21,page717–729. IEEEPress,2022. [124] T.H.M.Le,B.Sabir,andM.A.Babar. Automatedsoftwarevulnerabilityassessmentwithconceptdrift. In2019IEEE/ACM16th InternationalConferenceonMiningSoftwareRepositories(MSR),pages371–382,2019. [125] X.Li,L.Wang,Y.Xin,Y.Yang,andY.Chen. Automatedvulnerabilitydetectioninsourcecodeusingminimumintermediate representationlearning.AppliedSciences,10(5),2020. [126] V.Nguyen,T.Le,T.Le,K.Nguyen,O.DeVel,P.Montague,L.Qu,andD.Phung.Deepdomainadaptationforvulnerablecodefunction identi￿cation.In2019InternationalJointConferenceonNeuralNetworks(IJCNN),pages1–8,2019. [127] M.-J.Choi,S.Jeong,H.Oh,andJ.Choo.End-to-endpredictionofbu￿eroverrunsfromrawsourcecodevianeuralmemorynetworks, 2017. [128] J.Zhou,M.Pacheco,Z.Wan,X.Xia,D.Lo,Y.Wang,andA.E.Hassan.Findinganeedleinahaystack:Automatedminingofsilent vulnerability￿xes.In202136thIEEE/ACMInternationalConferenceonAutomatedSoftwareEngineering(ASE),pages705–716,2021. [129] G.Nguyen-Truong,H.J.Kang,D.Lo,A.Sharma,A.E.Santosa,A.Sharma,andM.Y.Ang.Hermes:Usingcommit-issuelinkingto detectvulnerability-￿xingcommits.In2022IEEEInternationalConferenceonSoftwareAnalysis,EvolutionandReengineering(SANER), pages51–62,2022.
[130] Y.Zhou,J.K.Siow,C.Wang,S.Liu,andY.Liu.Spi:Automatedidenti￿cationofsecuritypatchesviacommits.ACMTrans.Softw.Eng. Methodol.,31(1),sep2021. [131] K.Filus,M.Siavvas,J.Domańska,andE.Gelenbe.Therandomneuralnetworkasabondingmodelforsoftwarevulnerabilityprediction. InMariaCarlaCalzarossa,ErolGelenbe,KrysztofGrochla,RicardoLent,andTadeuszCzachórski,editors,Modelling,Analysis,and SimulationofComputerandTelecommunicationSystems,pages102–116,Cham,2021.SpringerInternationalPublishing. [132] H.Perl,S.Dechand,M.Smith,D.Arp,F.Yamaguchi,K.Rieck,S.Fahl,andY.Acar. Vcc￿nder:Findingpotentialvulnerabilitiesin open-sourceprojectstoassistcodeaudits. InProceedingsofthe22ndACMSIGSACConferenceonComputerandCommunications Security,CCS’15,page426–437,NewYork,NY,USA,2015.AssociationforComputingMachinery. [133] A.Xu,T.Dai,H.Chen,Z.Ming,andW.Li.Vulnerabilitydetectionforsourcecodeusingcontextuallstm.In20185thInternational ConferenceonSystemsandInformatics(ICSAI),pages1225–1230,2018. [134] N.ZiemsandS.Wu.Securityvulnerabilitydetectionusingdeeplearningnaturallanguageprocessing.InIEEEINFOCOM2021-IEEE ConferenceonComputerCommunicationsWorkshops(INFOCOMWKSHPS),pages1–6,2021. [135] D.Cao,J.Huang,X.Zhang,andX.Liu.Ftclnet:Convolutionallstmwithfouriertransformforvulnerabilitydetection.In2020IEEE 19thInternationalConferenceonTrust,SecurityandPrivacyinComputingandCommunications(TrustCom),pages539–546,2020. [136] X.Wang,S.Wang,P.Feng,K.Sun,S.Jajodia,S.Benchaaboun,andf.Geck.Patchrnn:Adeeplearning-basedsystemforsecuritypatch identi￿cation.InMILCOM2021-2021IEEEMilitaryCommunicationsConference(MILCOM),pages595–600,2021. [137] C.Mamede,E.Pinconschi,R.Abreu,andJ.Campos.Exploringtransformersformulti-labelclassi￿cationofjavavulnerabilities.In 2022IEEE22ndInternationalConferenceonSoftwareQuality,ReliabilityandSecurity(QRS),pages43–52,2022. [138] F.R.AbdulhamzaandR.J.S.Al-Janabi.Sqlinjectiondetectionusing2d-convolutionalneuralnetworks(2d-cnn).In2022International ConferenceonDataScienceandIntelligentComputing(ICDSIC),pages212–217,2022. [139] M.MimuraandY.Suga.Filteringmaliciousjavascriptcodewithdoc2veconanimbalanceddataset.In201914thAsiaJointConference onInformationSecurity(AsiaJCIS),pages24–31,2019. ACMComput.Surv.,Vol.37,No.4,Article111.Publicationdate:March2024.ASurveyofSourceCodeRepresentationsforMachineLearning-BasedCybersecurityTasks • 111:33 [140] X.Zhouetal.Securitycoderecommendationsforsmartcontract.In2023IEEEInternationalConferenceonSoftwareAnalysis,Evolution andReengineering(SANER),pages190–200,2023. [141] Z.Liuetal.Softwarevulnerabilitydetectionwithgptandin-contextlearning.In20238thInternationalConferenceonDataSciencein Cyberspace(DSC),pages229–236,2023. [142] Y.aandothersWei. Vulrep:Vulnerabilityrepairbasedoninducingcommitsand￿xingcommits. EURASIPJournalonWireless CommunicationsandNetworking,2023(1),2023. [143] H.V.Nguyen,J.Zheng,A.Inomata,andT.Uehara.Codeaggregategraph:E￿ectiverepresentationforgraphneuralnetworkstodetect vulnerablecode.IEEEAccess,10:123786–123800,2022. [144] Z.Li,D.Zou,S.Xu,H.Jin,Y.Zhu,andZ.Chen.Sysevr:Aframeworkforusingdeeplearningtodetectsoftwarevulnerabilities.IEEE TransactionsonDependableandSecureComputing,19(4):2244–2258,2022. [145] A.Mahyari. Ahierarchicaldeepneuralnetworkfordetectinglinesofcodeswithvulnerabilities. In2022IEEE22ndInternational ConferenceonSoftwareQuality,Reliability,andSecurityCompanion(QRS-C),pages1–7,2022. [146] R.Ferenc,P.Hegedűs,P.Gyimesi,G.Antal,D.Bán,andT.Gyimóthy.Challengingmachinelearningalgorithmsinpredictingvulnerable javascriptfunctions.In2019IEEE/ACM7thInternationalWorkshoponRealizingArti￿cialIntelligenceSynergiesinSoftwareEngineering (RAISE),pages8–14,2019. [147] A.D.Sawadogo,T.F.Bissyandé,N.Moha,K.Allix,J.Klein,L.Li,andY.L.Traon.Learningtocatchsecuritypatches,2020. [148] I.ChowdhuryandM.Zulkernine.Usingcomplexity,coupling,andcohesionmetricsasearlyindicatorsofvulnerabilities.Journalof SystemsArchitecture,57(3):294–313,2011.SpecialIssueonSecurityandDependabilityAssuranceofSoftwareArchitectures. [149] L.Yang,X.Li,andY.Yu.Vuldigger:Ajust-in-timeandcost-awaretoolfordiggingvulnerability-contributingchanges.InGLOBECOM 2017-2017IEEEGlobalCommunicationsConference,pages1–7,2017. [150] L.Kumar,C.Hota,A.Mahindru,andL.B.M.Neti. Androidmalwarepredictionusingextremelearningmachinewithdi￿erent kernelfunctions.InProceedingsofthe15thAsianInternetEngineeringConference,AINTEC’19,page33–40,NewYork,NY,USA,2019. AssociationforComputingMachinery. [151] Z.Wang,J.Guo,andH.Li. Vulnerabilityfeatureextractionmodelforsourcecodebasedondeeplearning. In2021International ConferenceonComputerNetwork,ElectronicandAutomation(ICCNEA),pages21–25,2021. [152] M.Zagane,M.K.Abdi,andM.Alenezi. Deeplearningforsoftwarevulnerabilitiesdetectionusingcodemetrics. IEEEAccess, 8:74562–74570,2020.
[153] S.Ganesh,T.Ohlsson,andF.Palma.Predictingsecurityvulnerabilitiesusingsourcecodemetrics.In2021SwedishWorkshoponData Science(SweDS),pages1–7,2021. [154] T.-Y.Chong,V.Anu,andK.Z.Sultana.Usingsoftwaremetricsforpredictingvulnerablecode-components:Astudyonjavaandpython opensourceprojects.In2019IEEEInternationalConferenceonComputationalScienceandEngineering(CSE)andIEEEInternational ConferenceonEmbeddedandUbiquitousComputing(EUC),pages98–103,2019. [155] N.Medeiros,N.Ivaki,P.Costa,andM.Vieira.Vulnerablecodedetectionusingsoftwaremetricsandmachinelearning.IEEEAccess, 8:219174–219198,2020. [156] M.Hasan,Z.Balbahaith,andM.Tarique. Detectionofsqlinjectionattacks:Amachinelearningapproach. In2019International ConferenceonElectricalandComputingTechnologiesandApplications(ICECTA),pages1–6,2019. [157] M.-H.Tsaietal. Powerdp:De-obfuscatingandpro￿lingmaliciouspowershellcommandswithmulti-labelclassi￿ers. IEEEAccess, 11:256–270,2023. [158] C.Thapa,S.I.Jang,M.E.Ahmed,S.Camtepe,J.Pieprzyk,andS.Nepal.Transformer-basedlanguagemodelsforsoftwarevulnerability detection.InProceedingsofthe38thAnnualComputerSecurityApplicationsConference,ACSAC’22,page481–496,NewYork,NY,USA, 2022.AssociationforComputingMachinery. [159] Z.Tang,Q.Hu,Y.Hu,W.Kuang,andJ.Chen.Sevuldet:Asemantics-enhancedlearnablevulnerabilitydetector.In202252ndAnnual IEEE/IFIPInternationalConferenceonDependableSystemsandNetworks(DSN),pages150–162,2022. [160] S.Kim,J.Choi,M.E.Ahmed,S.Nepal,andH.Kim.Vuldebert:Avulnerabilitydetectionsystemusingbert.In2022IEEEInternational SymposiumonSoftwareReliabilityEngineeringWorkshops(ISSREW),pages69–74,2022. [161] S.Liu,G.Lin,L.Qu,J.Zhang,O.DeVel,P.Montague,andY.Xiang.Cd-vuld:Cross-domainvulnerabilitydiscoverybasedondeep domainadaptation.IEEETransactionsonDependableandSecureComputing,19(1):438–451,2022. [162] D.Hin,A.Kan,H.Chen,andM.A.Babar.Linevd:Statement-levelvulnerabilitydetectionusinggraphneuralnetworks.InProceedings ofthe19thInternationalConferenceonMiningSoftwareRepositories,MSR’22,page596–607,NewYork,NY,USA,2022.Associationfor ComputingMachinery. [163] S.Ndichu,S.Ozawa,T.Misu,andK.Okada.Amachinelearningapproachtomaliciousjavascriptdetectionusing￿xedlengthvector representation.In2018InternationalJointConferenceonNeuralNetworks(IJCNN),pages1–8,2018. [164] S.Liu,G.Lin,Q.-L.Han,S.Wen,J.Zhang,andY.Xiang.Deepbalance:Deep-learningandfuzzyoversamplingforvulnerabilitydetection. IEEETransactionsonFuzzySystems,28(7):1329–1343,2020. ACMComput.Surv.,Vol.37,No.4,Article111.Publicationdate:March2024.111:34 • BeatriceCasey,JoannaC.S.Santos,andGeorgePerry [165] N.Ashizawa,N.Yanai,J.P.Cruz,andS.Okamura.Eth2vec:Learningcontract-widecoderepresentationsforvulnerabilitydetectionon ethereumsmartcontracts.InProceedingsofthe3rdACMInternationalSymposiumonBlockchainandSecureCriticalInfrastructure,BSCI ’21,page47–59,NewYork,NY,USA,2021.AssociationforComputingMachinery. [166] R.Yan,X.Xiao,G.Hu,S.Peng,andY.Jiang.Newdeeplearningmethodtodetectcodeinjectionattacksonhybridapplications.Journal ofSystemsandSoftware,137:67–77,2018. [167] G.Lin,J.Zhang,W.Luo,L.Pan,O.DeVel,P.Montague,andY.Xiang.Softwarevulnerabilitydiscoveryvialearningmulti-domain knowledgebases.IEEETransactionsonDependableandSecureComputing,18(5):2469–2485,2021. [168] Z.Bilgin,M.A.Ersoy,E.U.Soykan,E.Tomur,P.Çomak,andL.Karaçay.Vulnerabilitypredictionfromsourcecodeusingmachine learning.IEEEAccess,8:150672–150684,2020. [169] H.Feng,X.Fu,H.Sun,H.Wang,andY.Zhang.E￿cientvulnerabilitydetectionbasedonabstractsyntaxtreeanddeeplearning.In IEEEINFOCOM2020-IEEEConferenceonComputerCommunicationsWorkshops(INFOCOMWKSHPS),pages722–727,2020. [170] M.Gu,H.Feng,H.Sun,P.Liu,Q.Yue,J.Hu,C.Cao,andY.Zhang.Hierarchicalattentionnetworkforinterpretableand￿ne-grained vulnerabilitydetection.InIEEEINFOCOM2022-IEEEConferenceonComputerCommunicationsWorkshops(INFOCOMWKSHPS),pages 1–6,2022. [171] Y.Mao,Y.Li,J.Sun,andY.Chen.Explainablesoftwarevulnerabilitydetectionbasedonattention-basedbidirectionalrecurrentneural networks.In2020IEEEInternationalConferenceonBigData(BigData),pages4651–4656,2020. [172] Y.He,H.Sun,andH.Feng.Ua-miner:Deeplearningsystemsforexposeunprotectedapivulnerabilityinsourcecode.In202012th InternationalConferenceonAdvancedComputationalIntelligence(ICACI),pages378–384,2020. [173] P.Momeni,Y.Wang,andR.Samavi.Machinelearningmodelforsmartcontractssecurityanalysis.In201917thInternationalConference onPrivacy,SecurityandTrust(PST),pages1–6,2019. [174] X.Yan,S.Wang,andK.Gai.Asemanticanalysis-basedmethodforsmartcontractvulnerability.In2022IEEE8thIntlConferenceon BigDataSecurityonCloud(BigDataSecurity),IEEEIntlConferenceonHighPerformanceandSmartComputing,(HPSC)andIEEEIntl
ConferenceonIntelligentDataandSecurity(IDS),pages23–28,2022. [175] H.Shi,R.Wang,Y.Fu,Y.Jiang,J.Dong,K.Tang,andJ.Sun.Vulnerablecodeclonedetectionforoperatingsystemthroughcorrelation- inducedlearning.IEEETransactionsonIndustrialInformatics,15(12):6551–6559,2019. [176] D.Coimbra,S.Reis,R.Abreu,C.Păsăreanu,andH.Erdogmus.Onusingdistributedrepresentationsofsourcecodeforthedetectionof csecurityvulnerabilities,2021. [177] J.-W.Liao,T.-T.Tsai,C.-K.He,andC.-W.Tien.Soliaudit:Smartcontractvulnerabilityassessmentbasedonmachinelearningandfuzz testing.In2019SixthInternationalConferenceonInternetofThings:Systems,ManagementandSecurity(IOTSMS),pages458–465,2019. [178] W.Zheng,A.O.AbdallahSemasaba,X.Wu,S.A.Agyemang,T.Liu,andY.Ge. Representationvs.model:Whatmattersmostfor sourcecodevulnerabilitydetection.In2021IEEEInternationalConferenceonSoftwareAnalysis,EvolutionandReengineering(SANER), pages647–653,2021. [179] X.Liu,X.Du,Q.Lei,andK.Liu.Multifamilyclassi￿cationofandroidmalwarewithafuzzystrategytoresistpolymorphicfamilial variants.IEEEAccess,8:156900–156914,2020. [180] M.Ceccato,C.D.Nguyen,D.Appelt,andL.C.Briand. So￿a:Anautomatedsecurityoracleforblack-boxtestingofsql-injection vulnerabilities.InProceedingsofthe31stIEEE/ACMInternationalConferenceonAutomatedSoftwareEngineering,ASE’16,page167–177, NewYork,NY,USA,2016.AssociationforComputingMachinery. [181] E.Spirin,E.Bogomolov,V.Kovalenko,andT.Bryksin. Psiminer:Atoolforminingrichabstractsyntaxtreesfromcode. In2021 IEEE/ACM18thInternationalConferenceonMiningSoftwareRepositories(MSR),pages13–17,2021. [182] U.Alon,M.Zilberstein,O.Levy,andE.Yahav.code2vec:Learningdistributedrepresentationsofcode,2018. [183] G.Buehrer,B.W.Weide,andP.A.G.Sivilotti.Usingparsetreevalidationtopreventsqlinjectionattacks.InProceedingsofthe5th InternationalWorkshoponSoftwareEngineeringandMiddleware,SEM’05,page106–113,NewYork,NY,USA,2005.Associationfor ComputingMachinery. [184] GuanjunLin,JunZhang,WeiLuo,LeiPan,andYangXiang.Poster:Vulnerabilitydiscoverywithfunctionrepresentationlearning fromunlabeledprojects.InProceedingsofthe2017ACMSIGSACConferenceonComputerandCommunicationsSecurity,CCS’17,page 2539–2541,NewYork,NY,USA,2017.AssociationforComputingMachinery. [185] D.G.FritzandR.G.Sargent.Anoverviewofhierarchicalcontrol￿owgraphmodels.InProceedingsofthe27thConferenceonWinter Simulation,WSC’95,page1347–1355,USA,1995.IEEEComputerSociety. [186] S.Sinha. Staticanddynamicanalysisofprogamsthatcontainarbitraryinterproceduralcontrol￿ow. PhDthesis,2002. Copyright- DatabasecopyrightProQuestLLC;ProQuestdoesnotclaimcopyrightintheindividualunderlyingworks;Lastupdated-2023-03-03. [187] J.Ferrante,K.J.Ottenstein,andJ.D.Warren.Theprogramdependencegraphanditsuseinoptimization.ACMTrans.Program.Lang. Syst.,9(3):319–349,jul1987. [188] D.GroveandC.Chambers.Aframeworkforcallgraphconstructionalgorithms.ACMTrans.Program.Lang.Syst.,23(6):685–746,nov 2001. [189] M.Weiser.Programslicing.IEEETransactionsonsoftwareengineering,(4):352–357,1984. ACMComput.Surv.,Vol.37,No.4,Article111.Publicationdate:March2024.ASurveyofSourceCodeRepresentationsforMachineLearning-BasedCybersecurityTasks • 111:35 [190] Y.Sui,X.Cheng,G.Zhang,andH.Wang.Flow2vec:Value-￿ow-basedprecisecodeembedding.Proc.ACMProgram.Lang.,4(OOPSLA), nov2020. [191] B.Ste￿en,J.Knoop,andO.Rüthing.Thevalue￿owgraph:Aprogramrepresentationforoptimalprogramtransformations,1990. [192] M.Zhang,Y.Duan,H.Yin,andZ.Zhao.Semantics-awareandroidmalwareclassi￿cationusingweightedcontextualapidependency graphs.Proceedingsofthe2014ACMSIGSACConferenceonComputerandCommunicationsSecurity,2014. [193] Y.Zhang,X.Yu,Z.Cui,S.Wu,Z.Wen,andL.Wang.Everydocumentownsitsstructure:Inductivetextclassi￿cationviagraphneural networks,2020. [194] T.XuandP.Zhou. Featureextractionforpayloadclassi￿cation:Abytepairencodingalgorithm. In2022IEEE8thInternational ConferenceonComputerandCommunications(ICCC),pages1–5,2022. [195] QuocV.LeandTomasMikolov.Distributedrepresentationsofsentencesanddocuments,2014. [196] Meiliana,S.Karim,H.L.H.S.Warnars,F.L.Gaol,E.Abdurachman,andB.Soewito.Softwaremetricsforfaultpredictionusingmachine learningapproaches:Aliteraturereviewwithpromiserepositorydataset.In2017IEEEInternationalConferenceonCyberneticsand ComputationalIntelligence(CyberneticsCom),pages19–23,2017. [197] I.Santos,F.Brezo,X.Ugarte-Pedrero,andP.G.Bringas.Opcodesequencesasrepresentationofexecutablesfordata-mining-based unknownmalwaredetection.InformationSciences,231:64–82,2013.DataMiningforInformationSecurity. [198] P.Kruchten.TheRationalUni￿edProcess:AnIntroduction.Addison-Wesley,2009. [199] Z.Chen,S.Kommrusch,andM.Monperrus.Neuraltransferlearningforrepairingsecurityvulnerabilitiesinccode.IEEETransactions onSoftwareEngineering,49(1):147–165,January2023. [200] Y.Chen,Z.Ding,L.Alowain,X.Chen,andD.Wagner. Diversevul:Anewvulnerablesourcecodedatasetfordeeplearningbased vulnerabilitydetection,2023.
[201] R.L.Russell,L.Kim,L.H.Hamilton,T.Lazovich,J.A.Harer,O.Ozdemir,P.M.Ellingwood,andM.W.McConley. Automated vulnerabilitydetectioninsourcecodeusingdeeprepresentationlearning,2018. [202] T.BolandandP.E.Black.Juliet1.1c/c++andjavatestsuite.Computer,45(10):88–90,2012. [203] LászlóSzekeres,MathiasPayer,TaoWei,andDawnSong. Sok:Eternalwarinmemory. In2013IEEESymposiumonSecurityand Privacy,pages48–62,2013. [204] J.K.Siow,S.Liu,X.Xie,G.Meng,andY.Liu.Learningprogramsemanticswithcoderepresentations:Anempiricalstudy.In2022IEEE InternationalConferenceonSoftwareAnalysis,EvolutionandReengineering(SANER),pages554–565,2022. [205] YuhuangHu,AdrianHuber,JithendarAnumula,andShih-ChiiLiu.Overcomingthevanishinggradientprobleminplainrecurrent networks,2019. [206] P.RunesonandM.Höst.Guidelinesforconductingandreportingcasestudyresearchinsoftwareengineering.EmpiricalSoftw.Engg., 14(2):131–164,apr2009. [207] J.Cohen.Acoe￿cientofagreementfornominalscales.EducationalandPsychologicalMeasurement,20(1):37–46,1960. [208] A.Kanuparthi,J.Rajendran,andR.Karri.Controllingyourcontrol￿owgraph.In2016IEEEInternationalSymposiumonHardware OrientedSecurityandTrust(HOST),pages43–48,2016. [209] M.Papachristou.Softwareclusteringswithvectorsemanticsandthecallgraph.InProceedingsofthe201927thACMJointMeeting onEuropeanSoftwareEngineeringConferenceandSymposiumontheFoundationsofSoftwareEngineering,pages1184–1186,Tallinn Estonia,August2019.ACM. ACMComput.Surv.,Vol.37,No.4,Article111.Publicationdate:March2024.
2403.11254 Efficiently Detecting Reentrancy Vulnerabilities in Complex Smart Contracts ZEXUWANG, SunYat-senUniversity,ChinaandPengChengLaboratory,China JIACHICHEN, SunYat-senUniversity,China YANLINWANG, SunYat-senUniversity,China YUZHANG, HarbinInstituteofTechnology,ChinaandPengChengLaboratory,China WEIZHEZHANG, HarbinInstituteofTechnology,ChinaandPengChengLaboratory,China ZIBINZHENG∗, SunYat-senUniversity,ChinaandGuangDongEngineeringTechnologyResearchCenter ofBlockchain,China Reentrancyvulnerabilityasoneofthemostnotoriousvulnerabilities,hasbeenaprominenttopicinsmart contractsecurityresearch.Researchshowsthatexistingvulnerabilitydetectionpresentsarangeofchallenges, especiallyassmartcontractscontinuetoincreaseincomplexity.Existingtoolsperformpoorlyintermsof efficiencyandsuccessfuldetectionratesforvulnerabilitiesincomplexcontracts. Toeffectivelydetectreentrancyvulnerabilitiesincontractswithcomplexlogic,weproposeatoolnamed SliSE.SliSE’sdetectionprocessconsistsoftwostages:WarningSearchandSymbolicExecutionVerification. InStageI,SliSEutilizesprogramslicingtoanalyzetheInter-contractProgramDependencyGraph(I-PDG)of thecontract,andcollectssuspiciousvulnerabilityinformationaswarnings.InStageII,symbolicexecutionis employedtoverifythereachabilityofthesewarnings,therebyenhancingvulnerabilitydetectionaccuracy. SliSEobtainedthebestperformancecomparedwitheightstate-of-the-artdetectiontools.ItachievedanF1 scoreof78.65%,surpassingthehighestscorerecordedbyanexistingtoolof9.26%.Additionally,itattaineda recallrateexceeding90%fordetectionofcontractsonEthereum.Overall,SliSEprovidesarobustandefficient methodfordetectionofReentrancyvulnerabilitiesforcomplexcontracts. CCSConcepts:•Softwareanditsengineering→Softwareverificationandvalidation;Softwaretesting anddebugging. AdditionalKeyWordsandPhrases:Reentrancydetection,Programslicing,Symbolicexecution ACMReferenceFormat: ZexuWang,JiachiChen,YanlinWang,YuZhang,WeizheZhang,andZibinZheng.2024.EfficientlyDetecting ReentrancyVulnerabilitiesinComplexSmartContracts.Proc.ACMSoftw.Eng.1,FSE,Article8(July2024), 21pages.https://doi.org/10.1145/3643734 ∗CorrespondingAuthor Authors’addresses:ZexuWang,SunYat-senUniversity,Zhuhai,ChinaandPengChengLaboratory,Shenzhen,China, wangzx97@mail2.sysu.edu.cn;JiachiChen,SunYat-senUniversity,Zhuhai,China,chenjch86@mail.sysu.edu.cn;Yanlin Wang,SunYat-senUniversity,Zhuhai,China,wangylin36@mail.sysu.edu.cn;YuZhang,HarbinInstituteofTechnology, Harbin,ChinaandPengChengLaboratory,Shenzhen,China,yuzhang@hit.edu.cn;WeizheZhang,HarbinInstituteof Technology,Harbin,ChinaandPengChengLaboratory,Shenzhen,China,wzzhang@hit.edu.cn;ZibinZheng,SunYat- senUniversity,Zhuhai,ChinaandGuangDongEngineeringTechnologyResearchCenterofBlockchain,Zhuhai,China, zhzibin@mail.sysu.edu.cn. Permissiontomakedigitalorhardcopiesofpartorallofthisworkforpersonalorclassroomuseisgrantedwithoutfee providedthatcopiesarenotmadeordistributedforprofitorcommercialadvantageandthatcopiesbearthisnoticeand thefullcitationonthefirstpage.Copyrightsforthird-partycomponentsofthisworkmustbehonored.Forallotheruses, contacttheowner/author(s). ©2024Copyrightheldbytheowner/author(s). ACM2994-970X/2024/7-ART8 https://doi.org/10.1145/3643734 Proc.ACMSoftw.Eng.,Vol.1,No.FSE,Article8.Publicationdate:July2024. 4202 raM 71 ]ES.sc[ 1v45211.3042:viXra8:2 ZexuWang,JiachiChen,YanlinWang,YuZhang,WeizheZhang,andZibinZheng 1 INTRODUCTION As decentralized applications (DApps) become more versatile in functionality, the complexity of the underlying contract logic has correspondingly increased. This escalation in complexity poses significant challenges for existing tools in detecting vulnerabilities within complex con- tracts.Reentrancyvulnerabilitiesareoneofthemostnotorioustypes[5].StartingfromtheDAO Reentrancyattack[11]in2016thatcauseda$150millionlossindigitalassets,Reentrancyattacks onblockchaincontinuetooccur.Concurrently,manyacademicstudiesandtoolsfordetecting Reentrancyvulnerabilitieshaveemerged. VariousapproacheshavebeenemployedtodetectReentrancyvulnerabilities,includingsymbolic execution [9, 25, 27, 36, 42], fuzz testing [8, 21, 28, 32], static analysis [1, 12, 40], and formal verification[10,12,37].However,mostexistingtoolshavebeenevaluatedonrelativelysimple contractdatasets,e.g.,theSmartBugsDataset [31],lackingexperimentalassessmentsonreal-world complex contracts. To ascertain their efficacy in complex DApps, Zheng et al. [45] curated a datasetthatcomprises895vulnerabilities.Thesevulnerabilitieswereobtainedfrom1,322open- sourceDAppauditreportsprovidedby30blockchainsecuritycompanies,covering25typesof vulnerabilities.ComparedtotheSmartBugsdataset [31],ithasapproximately25timestheaverage linesofcodeand30timestheaveragefunctioncount.Thestudyalsoevaluatedfivestate-of-the-art vulnerabilitydetectiontools[1,8,9,12,37]onthisdataset.Theexperimentalresultsrevealedthat mosttoolshadalowsuccessdetectionrate(lessthan30%),especiallyforReentrancyvulnerabilities
(lessthan11%).Thisemphasizesthenecessityoffocusingondetectingreal-worldvulnerabilitiesin complexcontractsratherthansimpletoycontractsinfutureresearch. Real-worldReentrancyattackeventsusuallyinvolvecomplexfunctioncallrelationships.How- ever,mostexistingtoolsfocusonlyonthesecurityofasinglefunctionwithinthecontract,which isinsufficienttoguaranteetheoverallcontractsafety.CompositionalReentrancyvulnerabilities of smart contracts are introduced by cross-contract interactions, such as cross-function Reen- trancy [34] and cross-contract Reentrancy [35], presenting considerable challenges. Effective detectionofcompositionalReentrancyvulnerabilitiesrequiresathoroughanalysisofcontract interactionsandasystematicexaminationofdataandcontrolflowtransitions.Incorporatingpro- gramsemanticscouldenhancetheaccuracyofidentifyingcriticaldataandcontrolflows,thereby improvingcompositionalReentrancyvulnerabilitydetectioneffectiveness.Additionally,thecom- putationofdynamicjumpaddressesposesanothermajorchallenge,oftenresultinginincomplete ControlFlowGraph(CFG)paths.Despitecomplexfunctioncallrelationshipsmaygeneratenumer- ousexecutionpaths,manycurrenttoolspredominantlyrelyonstaticstacksimulationforpath recovery.Thismethodstruggleswithdynamicjumpaddresses,leavingCFGpathsincompleteand compromisingtheaccuracyofsymbolicexecution.Therefore,theanalysisofcompositionalReen- trancyvulnerabilityandCFGpathrecoveryarethekeychallengesincomplexcontractReentrancy vulnerabilitydetection. Toaddressthesechallenges,weproposetheSliSEmethod,whichcombinessymbolicexecution with program slicing to detect Reentrancy vulnerabilities in complex contracts. The detection process is divided into two stages: Warning Search and Symbolic Execution Verification. In the WarningSearchstage,programslicingisperformedbasedonprogramdependenciestosearchand extractcriticalpaths.Subsequently,intheSymbolicExecutionVerificationstage,thereachability ofthesecriticalpathsisfurtherverifiedtoachieveefficientReentrancyvulnerabilitydetection. Throughprogramdependencyanalysis,theI-PDG(Inter-contractProgramDependencyGraph)of contractisconstructed,whichcanprovidecross-contractdataandcontroldependencies.Combined with our Reentrancy vulnerability slicing standards, the I-PDG is sliced to prune paths. SliSE analyzesprogramdependencyamongcriticalinstructionstatements(suchasrequire,assert,etc.) Proc.ACMSoftw.Eng.,Vol.1,No.FSE,Article8.Publicationdate:July2024.EfficientlyDetectingReentrancyVulnerabilitiesinComplexSmartContracts 8:3 tomodelthesemantics,thenidentifiesdataandcontrolflowtransitions.Withthisinformation, itverifieswhetherthesesemanticsadheretothesecureC-E-I(Check->Effect->Interaction)patten. Thispatternmandatestimelystateupdatesbeforeinteractingwithexternalcontracts,maintaining atomicityoftransactionsandpreventReentrancy(seesection2fordetails).Ifdeviationsfrom thispatternaredetected,SliSEgeneratesawarningthatincludesthecorrespondingfunctionand locationinformation.IntheSymbolicExecutionVerificationstage,SliSEemploystheCFGpath recoveryalgorithmtorecoveryexecutionpathsandgatheressentialpathconstraints.Finally,it provesthereachabilityofthepathandtheexistenceofvulnerabilitiesthroughconstraintsolving. WeevaluatedSliSE’sperformancebycomparingthedetectionresultswitheightstate-of-the-art tools,analyzingitsperformanceindetectingReentrancyvulnerabilitiesfromcomplexcontracts. TheresultsshowthatSliSEperformswellindetectingcomplexcontractReentrancyvulnerabilities, withtheF1scoreof78.65%,significantlyexceedingthehighestscoreof9.26%achievedbytheeight state-of-the-arttools.ToassessitseffectivenessindetectingReentrancyvulnerabilitiesonEthereum, weusedtwopubliclyavailabledatasets[31,46],SliSEdemonstratedanoutstandingrecallrate of92.68%andmaintainedthehighestF1scorescomparedtotheexistingtools.Throughablation experimentsforeachstage,weconfirmedthecriticalimportanceofeachstageintheoverallprocess. Notably,precisepathpruningduringstageIisinstrumentalforefficientvulnerabilitydetection. ThispathpruningprocesscontributedtoasignificantincreaseintheF1score,elevatingitfrom 6.59%to78.65%.Additionally,withthestageIIofsymbolicexecutionverification,weobserveda substantialreductioninfalsepositives,resultinginaprecisionimprovementfrom47.30%to72.16%. Overall,SliSEprovidesarobustandefficientmethodtoefficientlydetectReentrancyvulnerabilities incomplexcontracts. Themaincontributionsofourworkareasfollows: (1) Weproposeanapproachthatcombinesprogramslicingandsymbolicexecutiontoefficiently detectReentrancyvulnerabilitieswithincomplexcontracts.Bypruningandverifyingthe reachabilityofcriticalpaths,weachieveeffectivedetection. (2) WedesigntheSliSEtool,whichefficientlydetectscomplexcontractsReentrancyvulnerabili- ties.Comparativeexperimentswithstate-of-the-arttoolssubstantiateitseffectivenessand efficiency. (3) WehavemadeourSliSEtool’ssourcecodeandexperimentaldatasetpubliclyavailableat https://github.com/SliSE-SC/SliSE. Thepaperisorganizedasfollows.InSection2,weprovideessentialbackgroundandhighlight
challengesindetectingcomplexcontractReentrancyvulnerabilitiesthroughmotivatingexamples. Section3outlinestheworkflowandtechnicaldetailsofSliSE.Weevaluatetheperformanceand efficiency of SliSE in Section 4, while Section 5 discusses existing tool capabilities in complex contractReentrancyvulnerabilitydetectionandthreatsanalysis.Section6summarizesrelated work,andSection7concludesthispaper. 2 BACKGROUNDANDMOTIVATION 2.1 ReentrancyVulnerabilityDetection C-E-I(Check->Effect->Interaction)isacriticalsecurityprogrammingpatterninsmartcontracts.It requiresthatexternalinteractionsshouldonlyoccurafterallpreconditionsarecheckedandinternal stateupdatesareeffected[14,19].Forexample,whentransferringtokenstoanexternalcontract (Interactionphase),itisessentialtoperformuserbalancechecks(Checkphase)andupdate balances(Effectphase)first.IncaseofanattackerattemptingReentrancy,thetransactionwillrevert duetothebalancecheckfailureinCheckphase,effectivelypreventingReentrancy.Evaluating Proc.ACMSoftw.Eng.,Vol.1,No.FSE,Article8.Publicationdate:July2024.8:4 ZexuWang,JiachiChen,YanlinWang,YuZhang,WeizheZhang,andZibinZheng whether token transfers in the contract adhere to the secure C-E-I (Check->Effect->Interaction) patternispivotalinReentrancyvulnerabilitydetection. ReentrancyvulnerabilitiestypicallystemfromtheviolationofthesecureCheck->Effect->Interaction (C-E-I) pattern, with attackers exploiting the smart contract’s fallback mechanism. The Check- >Effect->Interaction (C-E-I)securitypatternnecessitatesthatstatechangesoccurbeforecontract interactions,ensuringtimelystateupdates.Smartcontracts’fallbackmechanismactivatesauto- maticallyuponreceivingEther(nativetokens).Sometokenstandards,suchasERC-777[20]and ERC-1155[38],emulatethenativetoken’sfallbackmechanismusinghookfunctions.Ifthestate isnotpromptlyupdated,transferringtokenstotheattackercantriggertheirfallbackfunction, enablingthemtohijackthelogicandexploitReentrancyvulnerabilities.Thisdisruptstheatomicity ofthetransaction,leadingtomultipleexecutionsfromasinglefunctioncall. Figure1displaysaReentrancyvulnerability.Thewithdraw functionallowsuserstotransfer aamountoftokens.InL5,itfirstverifieswhethertheuser’sbalance(userBalance)exceedsthe transferredamount(Checkphase).Iftheconditionismet,executionproceeds,otherwise,the transactionisreverted.L6facilitatesthetransferof_𝑎𝑚𝑜𝑢𝑛𝑡 tokenstomsg.sender (Interaction phase).Followingthis,L7updatesthebalanceofmsg.sender (EffectPhase).Thissequenceinthe withdrawfunctionviolatesthesecureCheck->Effect->Interaction(C-E-I)transferpattern,which underminestransactionatomicityandtriggersReentrancyattacks.ThisvulnerabilityarisesinL6, whereatransferisinitiatedbymsg.sender (externaluser)usingcall.value.Sincethedeductionof theattacker’sbalanceisdeferredtoL7,thebalancecheck(Checkphase)inL5remainssatisfied, facilitatingfurthertokentransferstotheattacker.Exploitingthis,anattackercanrepeatedlytrigger thewithdrawfunctionthroughthefallbackmechanism.Torectifythis,thetransferlogicshould followtheCheck->Effect->Interaction(C-E-I)patten,withtheexecutionsequenceasL5->L7->L6. Thebalanceupdate(L7)mustprecedethetransfer(L6).Thisarrangementensuresthatevenifthe attackerreentersthewithdraw functionviathefallbackmechanism,thereentryfailsduetoan unsatisfiedbalancecheckcondition(L5). 1 contract ContractA { 2 ... 3 function withdraw(address _contractB, uint _amount) public { 4 userBalance = _contractB.getBalance(msg.sender); 5 require(userBalance >= _amount); 6 msg.sender.call{value:_amount}(""); 7 _contractB.reduceBalance(msg.sender,_amount); 8 } 9 } 10 contract ContractB { 11 mapping(address => uint) balances; 12 ... 13 function getBalance(address _address) public view returns (uint) { 14 return balances[_address]; 15 } 16 17 function reduceBalance(address _address, uint amount) public { 18 balances[_address] = balances[_address] - amount; 19 } 20 } Fig.1. TheexampleofReentrancy Proc.ACMSoftw.Eng.,Vol.1,No.FSE,Article8.Publicationdate:July2024.EfficientlyDetectingReentrancyVulnerabilitiesinComplexSmartContracts 8:5 Compared to native token (ETH), ERC tokens utilize hook functions to simulate the native token’sfallbackmechanism.Accordingtodifferentimplementationmethodsoffallbackmechanism, Reentrancyvulnerabilitiesaredividedintothefollowingtwocategories: • ReentrancywithETH:TransferringETH,usingthenativefallbackmechanismtoimplement callback; • Reentrancy with ERC Token: Transferring ERC tokens (derivative tokens), the hook functionisusedtoimplementcallback. Existinghookfunctionexamples,suchasERC-777andERC-1155,aredesignedtoaddresstheprob- lemofassetlock-inatreceivingaddresses.ERC-777tokensrequireinvokingtheERC777TokensSender (a hook function) before state updates [20]. If these updates are not synchronized, transaction atomicitycanbecompromised,resultinginReentrancyvulnerabilities.Figure2illustratesacross- contractReentrancyincidentonCreamFinance,wheretheattackoccurredonAugust30,2021.
TheattackerexploitedtheERC-777token’shookfunctiontoborrowdigitalcurrenciestwice,even thoughtheyhadpledgedassetsonlyonce.InthecriticalexecutionlogicoftheborrowFreshfunction, callsborrowAllowed tocheckuserstatus(Checkphase)inL11,ERCToken(borrower).transfer(...) executestheREC-777tokentransfer(Interactionphase)inL13,andupdatestheuser’sbalance (Effect phase) in L15. This violation of the Check->Effect->Interaction (C-E-I) transfer pattern resultedinCreamFinancelosingapproximately$18.8millionworthofdigitalassets.Notably,even thoughtheborrowInternal functioninL5hadaReentrancylock(nonReentrant),itstillfellprey toaReentrancyattack.Thisunderscoresthatasinglefunction’sReentrancylockcannot preventcross-contractReentrancyattacks.TheexecutionofborrowfunctioninL2involves multipleinternalfunctioncalls,eachwithdistinctpurposes.Itisessentialtoextractcriticalsemantic featurestoascertainwhetherthecontractabidesbythesecureCheck->Effect->Interaction(C-E-I) pattern. This emphasizes the significance of compositional security analysis when identifying Reentrancyvulnerabilitiesincomplexcontracts.BeyondexaminingtheReentrancyofindividual functions,itiscrucialtoconsiderinsecureimplementationswithinthecontractsemantics. 2.2 MotivationExamplesandChallenges 2.2.1 Semantic Modeling of Complex Contracts. The complexity of function call relationships complicates semantic analysis. Figure 3 depicts the key function calls during the execution of theborrow functioninFigure2.Thisfigurehighlightstheintricatelogicembeddedwithinthis function’s implementation. Due to the modular design of smart contract functions, there’s an inherentcomplexityinfunctioncalls.Throughsemanticanalysis,wediscernthattheoveralltoken transferprocessviolatestheCheck->Effect->Interaction(C-E-I)pattern.Crucially,theInteraction phase isexecutedbeforetheEffectphase,andthissequenceisapivotalfactorfortriggering Reentrancy.Whenthestateisnotupdatedpromptly,theconditioncheckedintheCheckphase becomesinvalid.Simultaneously,externalcontractcallbacks(attacker)intercepttheexecution, introducingmaliciouscodeforReentrancy.Forexample,inCreamFinance’scase,tokentransfers usingtheERC-777standardallowthereceivers(attacker)toexecutespecificlogicfromthehook functionuponreceivingERC-777tokens,whichisanimportantreasonforReentrancyattacks. Motivation:Existingapproachesusuallylackcomprehensivecompositionalanalysisofsmart contractprogramsemantics,typicallynarrowingtheirfocustoindividualfunctions.Thislimitation mayresultinmissedvulnerabilitiesandfalsepositivesindetectionresults.AsshowninFigure3, theexecutionoftheborrowfunctioninvolvescomplexcross-contractinteractionsandfunctioncalls. ToolssuchasSailfish[1]andMythril[9]struggletoanalyzedataflowduringthesecross-contract interactions,leadingtomissedvulnerabilitydetection.Ontheotherhand,staticanalysistools likeSlither[12]primarilyensureindividualfunctionsecurity,neglectingcompositionalsecurity Proc.ACMSoftw.Eng.,Vol.1,No.FSE,Article8.Publicationdate:July2024.8:6 ZexuWang,JiachiChen,YanlinWang,YuZhang,WeizheZhang,andZibinZheng 1 contract CreamFinance_Reentrancy{ 2 function borrow(uint borrowAmount) returns (uint) { 3 return borrowInternal(borrowAmount); 4 } 5 function borrowInternal(uint borrowAmount) internal nonReentrant returns (uint) { 6 uint error = accrueInterest(); 7 ... 8 return borrowFresh(msg.sender, borrowAmount); 9 } 10 function borrowFresh(address payable borrower, uint borrowAmount) internal returns (uint) { 11 uint allowed = comptroller.borrowAllowed(address(this), borrower, borrowAmount); 12 ... 13 ERCToken(borrower).transfer(borrowAmount); 14 ... 15 comptroller.borrowVerify(address(this), borrower, borrowAmount); 16 return uint(Error.NO_ERROR); 17 } 18 } Fig.2. TheborrowfunctioncausingCreamFinance’sReentrancy analysis.Thislimitationstemsfromtheirinabilitytosystematicallyanalyzestateinterdependencies amongmultiplefunctions,leadingtoasignificantnumberoffalsepositivesinthedetectionresults. Challenge:Theprimarychallengeinsemanticmodelingliesincomprehensivelyanalyzingpro- gramdependenciesandpreciselycapturingdatadependenciesduringcontractinteractions. Phase1: Check External Contract fail getCashPrior EIP20Interface.balanceOf Internal Function Call ... ComptrollerInterface.borrowAllowed Cross-Contract Function Call getBlockNumber borrowBalanceStoreInternal Phase2: Interaction borrowFresh doTransferOut ERC777_Hook_Function CErc20.borrow CToken.borrowInternal ComptrollerInterface.borrowVerify accrueInterest Phase3: Effect Reentrant Fig.3. Executionphasesandfunctioncallsoftheborrowfunction 2.2.2 CFGPathRecovery. Accuratelycollectingandpropagatingpathconstraintsarecrucialfor achievingeffectivecross-contractsymbolicexecution,directlyimpactingvulnerabilitydetection precision.InFigure4,theessentialCFGpathsoftheborrowfunctioninFigure2aredepicted.The bluearrowrepresentsthedynamicjumpedge(OrphanJump1)[10],wherethejumpaddressof 1OrphanJumplacksaprecedingPUSHopcode,makingitsjumptargetaddresschallengingtocomputeimmediately.
Proc.ACMSoftw.Eng.,Vol.1,No.FSE,Article8.Publicationdate:July2024.EfficientlyDetectingReentrancyVulnerabilitiesinComplexSmartContracts 8:7 theJUMP opcodecannotbedeterminedthroughstaticanalysis.Theblueovalblockrepresents OrphanJumpaddressblock,whileredfilledsquaresrepresentblockscontainingcross-contract interaction.AsshowninFigure4,theexecutionprocessoftheborrowfunctioninvolvesnumerous dynamicjumpedgecalculationsandcross-contractinteractions.ExistingtoolssuchasMythril[9], Sailfish[1],andManticore[27]encounterchallengesduetodifficultiesincomputingdynamic jumpaddressesandalackofsupportforcross-contractanalysis.Consequently,theirCFGpaths areincomplete,resultinginissuessuchasmissingpathconstraintsandincompletepathtraversal. Thisleadstoasignificantnumberoffalsenegativesinthedetectionresults. 0x6E0 0x6F3 0x7A1 0x7B3 0x815 0x850 Push Jump 0x7C5 0x828 Orphan Jump 0x85D 0x858 0x83A Basic Block 0x85E Orphan Jump Address Block 0x95E Cross-contract Interactions Block 0x274 Fig.4. ControlFlowGraph(CFG)oftheborrowfunction. Motivation:Recoveringdynamicjumpedgesiscrucialforcollectingpathconstraintsandcross- contractanalysis.AlthoughtraditionalsymbolicexecutionprovesbeneficialinCFGpathrecovery, itisstillchallengingindynamicjumpedgecalculations.Thislimitationhinderstheircapabilityto detectvulnerabilitiesincomplexcontracts.Whilesometoolsemploystaticstackemulationfor CFGrecovery,theystrugglewithdynamicjumpedges,leavingcontractCFGpathsincomplete.Fur- thermore,effectivecross-contractanalysisoncompleteCFGpathsisessentialforefficientsymbolic execution.Duringcross-contractinteractions,contextswitchingensuressmoothcollectionand propagationofpathconstraints.However,state-of-the-artsymbolicexecutiontoolslikeSailfish[1] andMythril[9]frequentlyfacedifficultiesincross-contractinteractions,primarilyduetopath losses,leadingtotimeoutsandexacerbatedpathexplosion. Challenge:EfficientlydeterminingdynamicjumpaddressesandrecoveringtheCFGpathpresent significantchallenges. 3 METHODOLOGY Inthissection,weintroducetheworkflowanddelveintothetechnicaldetailsofSliSE. 3.1 Overview TheSliSEmethodefficientlydetectscomplexcontractReentrancyvulnerabilitiesbycombining programslicingwithsymbolicexecutionverification.Theprocess,asillustratedinFigure5,takes sourcecodeasinput,reportsthepresenceofvulnerabilitiesandtheircorrespondinglocations. It consists of two main stages: Warning Search and Symbolic Execution Verification. In Stage I, SliSEanalyzesprogramdependenciesthroughtheAbstractSyntaxTree(AST)toconstructthe Inter-contractProgramDependencyGraph(I-PDG)ofthecontract.Itthenperformsslicinganalysis basedonReentrancyvulnerabilitycharacteristicstoidentifysuspiciousvulnerabilities’functions andlocations,whicharesubsequentlyreportedaswarninginformation.InStageII,criticalpaths containing warning information are extracted, followed by symbolic execution to validate the Proc.ACMSoftw.Eng.,Vol.1,No.FSE,Article8.Publicationdate:July2024.8:8 ZexuWang,JiachiChen,YanlinWang,YuZhang,WeizheZhang,andZibinZheng Stage Ⅰ: Stage Ⅱ: Warnings Search Symbolic Execution Verification Symbolic Execution AST Smart I-PDG Analyser Warnings Path Detection Contract Recovering Results Slicing SMT Slices Fig.5. TheworkflowofSliSE reachabilityofthesepaths.Thiscombinedstaticanalysisandsymbolicexecutiontoeffectively detectReentrancyvulnerabilitiesincomplexcontracts. 3.2 StageI:WarningsSearch Toimprovesearchefficiency,SliSEperformsprogramslicinganalysisontheglobalprogramdepen- dencyofcontracts,pruningirrelevantpathstoenhancedetectionefficiency.Initially,SliSEcompiles the contract source code to obtain the corresponding Abstract Syntax Tree (AST) information. ByanalyzingtheAST,itconstructstheInter-contractProgramDependencyGraph(I-PDG)forthe contract.CombinedwiththecharacteristicsofReentrancyvulnerabilities,programslicingisthen appliedtotheI-PDG.Thisprocessinvolvesverifyingwhetherthecorrespondingcodesemantics adheretothesecureCheck->Effect->Interaction(C-E-I)pattern.Theresultsofthisanalysisyield warninginformationaboutReentrancyvulnerabilities,whichservesasinputforStageII. Algorithm1:ConstructingI-PDG Input:Inter-contractControlFlowGraph(I-CFG) Output:Inter-contractProgramDependencyGraph(I-PDG) 1 FunctionConstructing_I-PDG(I-CFG): 2 I-PDG←Initializeanemptygraph; 3 foreachnodeinI-CFGdo 4 I-PDG.add_node(node); 5 foreachsuccessorinnode.successorsdo 6 I-PDG.add_node(successor); 7 I-PDG.add_control_edge(node,successor); 8 if modify(node,successor.variable)then 9 I-PDG.add_data_edge(node,successor); 10 end 11 end 12 end 13 returnI-PDG; 3.2.1 ConstructingInter-contractProgramDependencyGraph. Foracomprehensiveanalysisof smartcontract program semantics,we constructthe Inter-contractProgramDependencyGraph (I-PDG)byanalyzingtheoveralldependencyrelationshipswithinthecontract’sAST.Whilethe
existingInter-contractControlFlowGraph(I-CFG)[26]transformscross-contractcallsintoglobal jumpsbetweenstatementblocks,offeringanoverviewoftheglobalcontrolflow,itfaceschallenges Proc.ACMSoftw.Eng.,Vol.1,No.FSE,Article8.Publicationdate:July2024.EfficientlyDetectingReentrancyVulnerabilitiesinComplexSmartContracts 8:9 in achieving a comprehensive global data flow analysis due to intricate execution paths and frequentcross-contractinteractions.Buildinguponthecontract’sI-CFG,SliSEperformsprogram dependencyanalysisbetweeneachstatementblockthroughtheASTtocreatetheInter-Contract ProgramDependencyGraph(I-PDG).InthisI-PDG,nodesrepresentfundamentalstatements,and edgessignifyprogramdependencyrelationshipsinvolvingbothcontrolanddatadependencies. Algorithm1outlinestheconstructionprocessoftheI-PDG.Inthisalgorithm,L3–L4iteratethrough eachnodeintheI-CFGandaddittotheI-PDG,whileL5–L11addthesucceedingnodeofeachto theI-PDG.Simultaneously,weanalyzerelationshipsbetweendatadefinitionsandusage,aswellas controlrelationshipsbetweennodes,introducingdatadependencyedgesandcontroldependency edges. Figure6outlinestheprocessofconstructingtheInter-contractProgramDependencyGraph(I-PDG) forcontractsinFigure1.UtilizingthefoundationalnodesfromtheI-CFG[26]asastartingpoint, wegeneratesubgraphsofControlDependencyGraph(CDG)andDataDependencyGraph(DDG) basedonthecontroldependencyrelationshipsandthedatadependencyrelationshipsbetweenthe respectivenodes.Ourmaincontributionliesinachievingacomprehensiveprogramdependency analysisforcross-contractscenarios.Globalprogramdependenciesofferricherinformationfor analyzing global variables, user input, data flow during cross-contract interactions, and more. AsshowninFigure6,thebluesolidlinerepresentstheexistingInter-contractCallDependencies, enablingdirectdataflowanalysisincross-contractscenarios.Thishelpsusunderstandtheimpact offunctionsinvokedacrosscontractsonglobalstatevariablesandhowthisinfluencespreads. 3 13 4 5 14 7 6 3 16 3 3 1413 4 5 1 C7 DG 1413 4 7 65 1413 4 7 65 16 7 6 3 16 16 17 17 17 I-CFG 13 4 5 I-PDG I-PDG 14 16 7 6 Control Dependency Control & Data Dependency 17 DDG Data Dependency Inter-contract Call Dependency Fig.6. I-PDGconstructionforexampleinFigure1 3.2.2 Slicing & Analysing. To prune irrelevant paths, we have devised specific slicing criteria basedontheuniqueattributesofthesevulnerabilities.Thesecriteriaenablepreciseanalysisby extracting essential code sections that exhibit Reentrancy vulnerability traits. Leveraging the globalprogramdependency,weexplorerelationshipsamongcross-contractcalladdresses,input variables,anddataflowsduringcross-contractinteractions.Theserelationshipsarecrucialfora comprehensiveReentrancyvulnerabilityanalysis.However,sincethesedependencyrelationships arenotexclusivetosmartcontracts,uninformedanalysiscouldleadtosignificantfalsepositives.To addressthis,weproposevulnerabilityslicingrulesalignedwithReentrancyattackcharacteristics. These rules streamline the analysis of code relevant to Reentrancy vulnerabilities, enhancing detectionefficiency.Furthermore,byconsideringthecharacteristicsofReentrancyvulnerabilities andthefallbackmechanism,weestablishslicingcriteriaspecifictotwotypes:ETHandERCtokens. ThesecriteriaefficientlytargetandanalyzecoderelevanttoReentrancyvulnerabilities. • RuleforReentrancywithETH: Backward_Slicing[User_Input_Address.call.value()] Proc.ACMSoftw.Eng.,Vol.1,No.FSE,Article8.Publicationdate:July2024.8:10 ZexuWang,JiachiChen,YanlinWang,YuZhang,WeizheZhang,andZibinZheng • RuleforReentrancywithERCToken: Backward_Slicing[ERC(User_Input_Address).call_function()] RuleforReentrancywithETH usedforvulnerabilitiesoriginatingfromETHtransfers,primarily utilizingthecall.value().Consequently,ourfocusisonthedynamicaccountaddress(userinputused asaddress)triggeringthecall.value()function.EmployingbackwardslicingontheI-PDG,weisolate nodeswithdependencies,concentratingonsectionspertinenttotheReentrancyvulnerability. RuleforReentrancywithERCTokenfocusesonReentrancyoriginatingfromERCtokentransfers. Itcentersonthefunctioncalltriggeredbythedynamicaddresscontract(userinputusedascontract address),servingastheentrypoint.ByconductingbackwardslicingoftheI-PDG,weretainnodes with dependencies in the slice. For example, in Figure 2, L13 serves as the slicing entry point. Thislinecallsthefunctionfromthedynamicaddresscontract(ERCToken(borrower)).Giventhe uncertain logic of the external contract, attackers can exploit it by injecting malicious code to achieveReentrancy. AsdepictedinFigure7,thisdemonstrationfollowstheRuleforReentrancywithETH standard forthecodeinFigure1.Inthisexample,msg.sender.call.valueinL6meetsthecriteriaforslicing, serving as the entry point for conducting a backward slice. The resulting sliced code snippet isdisplayedontherightside.L3,L4,andL5containthecodewithinthebackwardslice,while L13andL14areretainedinthesliceduetotheirinter-contractcalldependencies.Thisretention
preservestheintegrityofdependencyrelationships.OurReentrancyvulnerabilityprogramslicing criterionfacilitatesfocusedanalysisofcriticalcodesegmentswhilepreservingrelevantdependency relationships.Thisapproachenablesefficientandprecisecodeanalysis,mitigatingtheimpactof unrelatedcodeonvulnerabilitydetection’seffectivenessandaccuracy. 3 3 13 4 5 13 4 5 14 7 6 Slicing 14 7 6 Control Dependency 16 16 Data Dependency 17 17 Control & Data Dependency I-PDG Slice Inter-contract Call Dependency Fig.7. SlicingprocessforexampleinFigure1 Tomodelandanalyzesemantics,wescrutinizetheprogramdependenciesofcriticalinstruction- relatedstatements.Forinstance,inFigure1,wecanextracttheCheckphasewithinthecodeusing conditionalcheckslikerequireandassert,alongwithcontroldependencyanalysis.Thepresenceof cross-contractfunctioncallsindicatestheInteractionphase,whilestatementsrelatedtovariable updatesintheCheckphase correspondtotheEffectphase.SliSEemploysthisinformationto slicethecontract’sI-PDG,assessingitsadherencetothesecuredevelopmentpatternCheck->Effect- >Interaction(C-E-I),andgeneratingpertinentwarningmessages. Figure8illustratestheanalysisprocedureoftheslicecorrespondingtothecodepresentedin Figure 1. Within the slice code, the require() in L5 represents the Check phase, involving the balances[user]variable.Additionally,msg.sender.call.value()inL6signifiestheInteractionphase, aligningwiththeslicingruleoftheRuleforReentrancywithETH.Itisnoteworthythatnoactions aretakentoalterthebalances[user]variable(modificationoccursinL7),indicatingthattheEffect phase follows the Interaction phase. This observation plays a crucial role in identifying the Reentrancyvulnerability.Whenanattackerreentersthroughtheexternalcontract,thestateof theCheckphaseremainsunchanged(balances[user]doesnotdecrease).Consequently,itcanbe Proc.ACMSoftw.Eng.,Vol.1,No.FSE,Article8.Publicationdate:July2024.EfficientlyDetectingReentrancyVulnerabilitiesinComplexSmartContracts 8:11 identifiedasaReentrancyvulnerability.Wecompilethelocationdetailsofthevulnerablefunction togeneratewarninginformation. Contract A Contract B L3: function withdraw(address _contractB... L13: function getBalance(address _address)... L4: userBalance = _contractB.getBalance... L5: require(userBalance >= _amount); Phase 1: Check L14: return balances[_address]; Phase 2: Interaction L6: msg.sender.call{value:_amount}... L11: mapping(address => uint) ... L7: _contractB.reduceBalance... L16: function reduceBalance... Phase 3: Effect L17: balances[_address]... Exit Exit Control Dependency Data Dependency Control & Data Dependency Inter-contract Call Dependency In slice Not in slice Fig.8. AnalysisofProgramSlicingforFigure1 3.3 StageII:SymbolicExecutionVerification To reduce false positives and ensure reliable vulnerability detection, SliSE employs symbolic executionforpathreachabilityverification.Initially,SliSEaddressesthechallengeofcalculating dynamicjumpaddresses(OrphanJump)byusingAlgorithm2combinedwithSSA2(StaticSingle Assignment)toensurecompleteCFGpaths.BasedonthewarninginformationfromStageI,critical pathsaretraversed,andpathconstraintsarecollected.TheseconstraintsarestoredintheSymbolic Register forlateraccessandretrieval.TheyarethenvalidatedusingtheZ3-solver toconfirmpath reachabilityand,consequently,theexistenceofvulnerabilities. 3.3.1 PathRecovering. ToefficientlyrecoverCFGpaths,weutilizeconstantpropagationanalysis combinedwithSSAtodeterminethetargetaddressesofOrphanJumps.Initially,thebytecode isdividedintomultipleblocks,andsomeblocksconnectionarerecoveredthroughstaticstack emulation.However,staticstackemulationcanonlyrecoverjumpedgesforPushJumps3,andit cannotascertainthetargetofanOrphanJumpwithinthecurrentblock.Thislimitationresultsin theinabilitytorecovertheedgesofOrphanJumps,leavingtheCFGincomplete. Figure9illustratesanexampleofrecoveringtheCFGwithSSA.IntheincompleteCFGobtained throughstaticstackemulation,aJUMP instructionexistswithinBlock_ID6,butthisblocklacks successorblocks,creatingwhatisknownasanOrphanJump.Theinherentlimitationofstatic stackemulationinaddressingtheOrphanJumpissueliesinitsincompleteanalysisofvariable transitions within the current block. It cannot effectively analyze the flow of values along the execution path of the current block, making it incapable of solving the Orphan Jump problem effectively.ByleveragingthepropertyofSSA,whichensuresthateachvariableisassignedonly once,wecanperformglobalvaluepropagationanalysisalongtheexecutionpathoftheJumpblock. TheSSArepresentationclearlyhighlightstherelationshipsbetweenvariabledefinitionsandtheir useswithineachblock.If,priortotheJumpinstruction,thereexistsavariablethatisdefinedbut 2SSAisapropertyofintermediatelanguagesthatmandateseachvariabletobeassignedonlyonce,enablingfeatureslike constantpropagationanalysis. 3PushJumpisimmediatelyprecededbyaPUSHopcode,makingitsjumptargetaddresseasilycalculable. Proc.ACMSoftw.Eng.,Vol.1,No.FSE,Article8.Publicationdate:July2024.8:12 ZexuWang,JiachiChen,YanlinWang,YuZhang,WeizheZhang,andZibinZheng
notused,thatvariablerepresentsthestacktopvalueandservesasthetargetoftheOrphanJump.If nostacktopvalueisfoundinthecurrentblock,thesearchcontinuesintheprecedingblocks.For instance,inBlock_ID6,whereaJUMP instructionispresent,thestacktopvaluecanbelocatedin itspredecessorblock(Block_ID3).Thisapproachensuresthepreciserecoveryoftargetaddresses forOrphanJumps,obtainingacompleteCFG. Block_ID: 1 Block_offset: 0x0c ... 24: %6=EQ(#4,#5) 25: %7=PUSH(0x21) 27: JUMPI(#6,#7) Block_ID: 2 Block_ID: 3 Block_offset: 0x1c Block_offset: 0x21 29: %8=PUSH(0x00) 34: %10=PUSH(0x27) 31: %9=#8 36: %11=PUSH(0x35) 32: REVERT 38: JUMP(#11) Block_ID: 5 Block_ID: 6 Block_offset: 0x29 Block_offset: 0x35 ... 54: %16=PUSH(0x01) 46: %15=PUSH(0x35) … 48: JUMPI(#14,#15) 131: JUMP(IN Block_ID=3) Block_ID: 4 Block_offset: 0x27 40: STOP Fig.9. RecoveringtheCFGwithSSA Tosearchthetargetaddresses,weutilizeconstantpropagationanalysiswithSSAtocalculatethe dynamictargetaddressesofOrphanJumps.Algorithm2outlinestheprocessforCFGpathrecovery. ThisprocessinvolvestransformingthebytecodewithineachblockintoSSAform,ensuringthat eachnumericvalueisassignedonlyonce.AswetraversetheblocksoftheincompleteCFG,we updatetheSSAglobalvariables(ssaVariables)basedonoperationsthatinvolvevariableassignment oraccess(e.g.,PUSH,DUP,POP,SWAP)[39].Forexample,inL8–L9,weidentifythejumptarget bysearchingforunusedvariablesinthecurrentblock.Unusedvariablesrefertothosedefined intheblockbutarenotutilized.Ifsuchavariableexistswithinthecurrentblock,itbecomesthe jumptarget.However,ifitdoesnot,weperformasearchwithintheprecedingblocksalongthe CFGpathwherethecurrentblockislocated.ThefindUnusedVar functionfacilitatesthisiterative searchprocess.Thisalgorithmensuresaccuratepropagationofvaluesalongtheexecutionpath, facilitatingthepreciserecoveryoftargetaddressesforOrphanJumpsembeddedwithintheCFG. 3.3.2 SymbolicallyVerifyingPathFeasibility. ToensuretheaccuracyofReentrancyvulnerability detection, we perform a reachability analysis of the warning path information. While Stage I conductsprogramslicinganalysistogeneratewarninginformationrelatedtoviolatingtheCheck- >Effect->Interaction(C-E-I)pattern,itlacksverificationofthereachabilityofthewarningpaths. Specifically,contractPathProtectiveTechniques(PPTs)[40],suchasmutexlocksandpermission checks,canlimittheoccurrenceofReentrancyvulnerabilities.Staticanalysistoolsoftenstruggle toaccuratelyidentifytheseprotectivetechniques,leadingtonumerousfalsepositives.Toaddress this,SliSEcollectswarningpathconstraintsandvalidatestheirreachability.Whenitencounters assignmentsoraccessesinvolvingsymbolicvalues,itusesthesymbolicregister forsymbolicstate manipulation.ConsideringtheconditionsforReentrancyvulnerabilityandthecharacteristicsof smartcontracts,itcollectsconstraintsandsendsthemtoZ3-solver forconstraintcalculationto provepathreachability,significantlyenhancingtheaccuracyofvulnerabilitydetection. Proc.ACMSoftw.Eng.,Vol.1,No.FSE,Article8.Publicationdate:July2024.EfficientlyDetectingReentrancyVulnerabilitiesinComplexSmartContracts 8:13 Toensuretheintegrityofpathconstraints,wemanagesymbolicexpressionsinakey-valuepair formatforfurtheranalysis.Byexaminingrelevantoperationsrelatedtostatestorageandutilizing Z3libraryfunctions,westoresymbolicexpressionsaskey-valuepairsintheSymbolicRegister. Theaccessandstorageofsymbolicstatesarecrucialforcollectingpathconstraints.Byquerying theSymbolicRegister,SliSEdetermineswhetherthecontractaddressinvolvedinexternalcontract functioncallsisasymbolicvalue,assessingwhetherthelogicofthecalledcontractiscontrolledby externalusers.Thisalsofacilitatesquickassessmentofwhetheracontractmightbesusceptible tohijackingbymaliciouscallbackfunctions,streamliningtheefficientpropagationanalysisof cross-contractpathconstraintsinvariouscontractcontextsduringsymbolicexecutionverification. Algorithm2:RecoveryCFG Input:CFG Output:ReconstructedCFG 1 FunctionRecoveryCFG(CFG): 2 foreachblockinCFG.blocksdo 3 jumpTarget←findUnusedVar(block); 4 CFG.add_edge(block,blockAt(jumpTarget)); 5 end 6 FunctionfindUnusedVar(block): 7 foreachpreBlockinblock.predecessorsdo 8 StackTopValue←getUnusedVariables(preBlock); 9 returnStackTopValue; 10 end 4 EVALUATION Inthissection,weevaluatedSliSE’sperformancebycomparingthedetectionresultswitheight state-of-the-arttoolsandanalyzingtheimpactofeachstageontheoveralldetectionprocess.We willaddressthefollowingresearchquestions: RQ1. HoweffectiveisSliSEindetectingcomplexcontractsReentrancyvulnerabilities? RQ2. HoweffectiveisSliSEindetectingReentrancyvulnerabilitiesonEthereum? RQ3. WhatistheimpactofpruninginStageI? RQ4. WhatistheimpactofsymbolicexecutionverificationinStageII? 4.1 ExperimentalSetup TheexperimentswereconductedonacomputerrunningUbuntu18.04.1LTS,equippedwitha 16-coreIntel(R)Xeon(R)Gold5217processor.Wesetuptheexperimentsbydownloadingimagesor
manuallyconfiguring.Weuseddefaultparametersandatimebudgetof300secondstoensurethat theexperimentsdidnotexcessivelyconsumetime,aligningwiththeapproachsuggestedin[46]. Tools selection following with [45], the experiment using a set of analysis tools, including Slither[12],Mythril[9],Securify[37],Smartian[8],andSailfish[1].Additionally,tofacilitatea comparativeanalysisoftheperformanceofexistingsymbolicexecutiondetectiontools,wealso integratedthreesymbolicexecutiontools,i.e.,Oyente[25],Osiris[36],andManticore[27]. TheexperimentaldatasetsincludeDB1,DB2,andDB3.DB1issourcedfromopen-sourceDApp auditing projects, representing off-chain versions with complex contract logic. DB2 comprises realon-chaincontracts,mostofwhichhavebeenlabeledaspositivebyexistingtools,posinga significantdetectionchallenge.DB3isthesimplestandmostwidelyutilizeddataset.Smartcontract Proc.ACMSoftw.Eng.,Vol.1,No.FSE,Article8.Publicationdate:July2024.8:14 ZexuWang,JiachiChen,YanlinWang,YuZhang,WeizheZhang,andZibinZheng ofthesedatasetsalloriginatefromreal-worldproductionenvironments,comparedandanalyzed 𝑇𝑃 𝑇𝑃 2∗𝑝𝑟𝑒𝑐𝑖𝑠𝑖𝑜𝑛∗𝑟𝑒𝑐𝑎𝑙𝑙 throughPrecision( ),Recall ( ),andF1score ( ).DB1wasemployedto 𝑇𝑃+𝐹𝑃 𝑇𝑃+𝐹𝑁 𝑝𝑟𝑒𝑐𝑖𝑠𝑖𝑜𝑛+𝑟𝑒𝑐𝑎𝑙𝑙 answerRQ1,RQ3,andRQ4,whileDB2andDB3wereutilizedtoanswerRQ2,RQ3,andRQ4. DB1. ComplexContractDataset [45].Zhengetal.compiledacomplexcontractdatasetthaten- compasses895vulnerabilitiesfrom1,322open-sourceDAppauditprojectsprovidedby30 blockchainsecuritycompanies,withatotalof81positivelabelsforReentrancyvulnerabilities. DB2. EthereumContractDataset [46].Zhengetal.usedexistingdetectiontoolstoanalyze230,548 verifiedcontractsfromEtherscanandobtainedthisdatasetthroughmanualinspection.The datasetincludes21,212contractsidentifiedaspositiveReentrancyvulnerabilitiesusingsix Reentrancydetectiontools,with41ofcontractsmanuallyverifiedastruepositives. DB3. SmartBugsDataset [31].TheSmartBugsincludes143contracts,ofwhich31arelabeledwith Reentrancyvulnerabilities. Researchindicatesthatmanuallylabelingsmartcontractsishighlyerror-prone[46],andachiev- ing precise labeling for all Ethereum smart contracts is a formidable challenge. To ensure the accuracyofourexperimentalanalysis,weconductedRQ2experimentsusingtheDB2andDB3 datasets.Thesedatasetshavehasbeenpresentedattopsoftwareengineeringconferencesandare widelyrecognizedwithintheacademiccommunity,servingasareliablesourceofgroundtruthfor Ethereumcontracts.Wehavecomparedkeyattributesamongthesedatasets,asdepictedinTable1. Notably,theaveragelinesofcode(Loc)inDB1areapproximately24.9timesgreaterthaninDB3 and3.4timesgreaterthaninDB2.Similarly,theaveragenumberoffunctionsis29.9timeshigherin DB3and3.6timeshigherinDB2comparedtoDB1.Thesefindingsunderscorethehighcomplexity ofDB1datasetfromtherealproductionenvironment,andwewilldelveintoaquantitativeanalysis ofDB1’scomplexityinSection5. Table1. Statisticsfordifferentdatasets Dataset Loc #ofSubcontracts #ofFunctions #P #N DB1 1812.5 12 197.4 81 814 DB2 534.5 6.0 54.2 41 21171 DB3 72.8 1.5 6.6 31 112 4.2 EffectivenessofDetectingReentrancyforComplexContracts Table2. StatisticsofDetectionResultsbyDifferentTools Tool Mythril Securify Slither Oyente Osiris Manticore Smartian Sailfish Ours Dataset DB1 DB2 DB3 DB1 DB2 DB3 DB1 DB2 DB3 DB1 DB2 DB3 DB1 DB2 DB3 DB1 DB2 DB3 DB1 DB2 DB3 DB1 DB2 DB3 DB1 DB2 DB3 #TP 5 8 13 0 31 29 8 36 30 0 32 28 0 34 29 0 0 0 0 7 19 0 26 25 70 38 30 #FP 22 15492 51 0 2356 44 140 18346 67 0 481 39 1 476 31 0 22 0 0 15 9 2 2270 31 27 31 14 #FN 76 33 18 81 10 2 73 5 1 81 9 3 81 7 2 81 41 31 81 34 12 81 15 6 11 3 1 #TN 792 5679 61 814 18815 68 676 2825 45 814 20690 73 813 20695 81 814 21149 112 814 21156 103 812 18901 81 787 21140 98 P 18.52% 0.05% 20.31%0.00% 1.30% 39.73%5.41% 0.20% 30.93%0.00% 6.24% 41.79%0.00% 6.67% 48.33%0.00%0.00%0.00%0.00%31.82%67.86%0.00% 1.13% 44.64%72.16%55.07%68.18% R 6.17% 19.51%41.94%0.00%75.61%93.55%9.88%87.80%96.77%0.00%78.05%90.32%0.00%82.93%93.55%0.00%0.00%0.00%0.00%17.07%61.29%0.00%63.41%80.65%86.42%92.68%96.77% F 9.26% 0.10% 27.37%0.00% 2.55% 55.77%6.99% 0.39% 46.88%0.00%11.55%57.14%0.00%12.34%63.74%0.00%0.00%0.00%0.00%22.22%64.41%0.00% 2.23% 57.47%78.65%69.09%80.00% *PrepresentsPrecision,RrepresentsRecall,andFrepresentsF1score. ToevaluateSliSE’scapabilityindetectingReentrancyvulnerabilitieswithincomplexcontracts, weconductedexperimentscomparingitwitheightstate-of-the-arttoolsusingtheDB1dataset.The results,presentedinTable2,demonstratethedetectionperformanceofdifferenttools.Itisevident thatexistingtoolsachieveamaximumF1scoreoflessthan10%,andmostofthemcorrectlydetect0
Reentrancyvulnerabilities.Thisunderscoresthelimitationsofexistingtoolsineffectivelydetecting vulnerabilitiesinreal-world,complexDApps.Incontrast,SliSEperformthebest,achievingan Proc.ACMSoftw.Eng.,Vol.1,No.FSE,Article8.Publicationdate:July2024.EfficientlyDetectingReentrancyVulnerabilitiesinComplexSmartContracts 8:15 impressiveF1scoreof78.65%onDB1,asignificantimprovementcomparedtothehighestF1score of9.26%achievedbyexistingtools. Table3comparesthedetectiontimesofdifferenttools.SliSEtakesanaverageof25.26secondsfor DB1detection.WhilethisishigherthanstaticanalysistoolslikeSlither,itfallswithinamoderate rangecomparedtomostsymbolicexecutiontools. Theanalysisofexperimentalresultsrevealsthatmanyexistingsymbolicexecutiontoolsface challengesinpreciselysearchingpathswhiledetectingvulnerabilitiesincomplexcontracts.Thisis duetotheirlimitedstateexplorationcapabilities,resultinginincompletepathidentificationand theomissionofnumerouspaths.Thisinefficiencyslowsdownprogramexecutionandgenerates anotablenumberoffalsenegatives.Therefore,precisepathpruningisessentialforefficientand effectivesymbolicexecutionvulnerabilitydetection. Table3. Comparisonofaveragedetectiontimes Dataset Mythril Securify Slither Oyente Osiris Manticore Smartian Sailfish Ours DB1 157.17 26.89 7.11 8.76 14.15 189.8 238.79 7.04 25.26 DB2 276.36 101.18 4.97 13.53 154.54 290.77 298.38 1.19 6.01 DB3 230.49 84 4.37 9.33 141.84 248.63 264.11 5.32 1.94 AnswertoRQ1:SliSEoutperformedothertoolsindetectingvulnerabilitieswithincomplex contracts,achievinganimpressiveF1scoreof78.65%,significantlysurpassingthemaximumof9.26% achievedbyothertools.TheprecisepathpruningtechniquesemployedbySliSEareinstrumental inensuringeffectiveandefficientvulnerabilitydetection. 4.3 EffectivenessofDetectingReentrancyonEthereumContracts ToevaluateSliSE’seffectivenessindetectingReentrancyvulnerabilitiesonEthereumcontracts,we conductedexperimentswithDB2andDB3.Thesedatasetsoriginatedfrompreviousresearchon smartcontractsecurity[31,46],containing68,610contractsthatweremanuallyannotatedand widelyrecognizedasthegroundtruthforReentrancyvulnerabilitiesonEthereumcontracts. Weconductedcomparativeexperimentsusingeightexistingtools,asshowninTable2.SliSE’s superiority is evident, achieving detection recall rates exceeding 90%. On DB2, it significantly outperforms existing tools, demonstrating robust detection capabilities for complex contracts. Notably,apartfromManticore,existingtoolsexhibitrelativelyaverageperformanceonDB3,with mostF1scoresreaching45%orhigher.However,whendealingwiththemorecomplexDB2,the vulnerabilitydetectioncapabilitiesofexistingtoolsnotablydecline.Themajorityofthesetoolsyield F1scoresbelow15%duetoahighnumberoffalsepositives.Furthermore,inthetimecomparison forDB2andDB3inTable3,symbolicexecutiontoolslikeMythrilandManticorestillstrugglewith pathexplosionissues,withaveragedetectiontimesexceeding230seconds.Incontrast,SliSE’s detectiontimeiscomparabletothatofstaticanalysistools,averaginglessthan7seconds. 1 function withdrawAll() external { 2 uint256 amount = address(this).balance; 3 bool success = msg.sender.call{value: amount}(""); 4 require(success, "Transfer failed"); 5 } Fig.10. FalsepositiveofReentrancywithouteconomicloss Through analysis, we have identified that false positives (FP) are primarily associated with specificsemanticdesigns,whichrefertoprogramimplementationstailoredforspecificscenarios. Proc.ACMSoftw.Eng.,Vol.1,No.FSE,Article8.Publicationdate:July2024.8:16 ZexuWang,JiachiChen,YanlinWang,YuZhang,WeizheZhang,andZibinZheng OneexampleisReentrancycanoccurwithoutcausingeconomiclosses,meaningthatReentrancy logicexistsinthecontractbutdoesnotresultinanylossestothevictim.Thesesituationsoften arisefromspecificprogramdesignchoicesthatpreventReentrancybutrequirecontextualanalysis. SliSEdoesnnotaccountforthesespecialsemanticdesigns,leadingtofalsepositives.InFigure10, thecodesatisfiestheconditionsforaReentrancyvulnerability,allowingReentrancythroughthe call.value()inL3.However,atthispoint,thecontract’sbalancehasalreadybeenfullytransferred, causingthetransactiontorevertduetoinsufficientbalanceduringtheReentrancy.Inreality,no economiclossoccurs.Thelackofspecificsemanticanalysissignificantlyresultsinfalsepositives inthedetectionoutcomes. AnswertoRQ2:SliSE’sadvantageofdetectingReentrancyvulnerabilitiesinEthereumsmart contractsisevidentwhencomparedtoexistingtools.Itconsistentlyachievesarecallrateexceeding 90%andmaintainshigherF1scorescomparedtothebestresultsachievedbyexistingtools. 4.4 ImpactofPruninginStageI InStageI,SliSEperformsprogramslicingtoanalyzethecontract’sI-PDG,pruningirrelevantcode segmentsfortargetedanalysis.ToevaluatetheimpactofpruninginStageIonoveralldetection,we conductedcomparativeexperimentsintwomodes:StageII andStageI&IIcombined (representing
withoutandwithStageI).BycomparingresultswithandwithoutpruninginStageI,weassessed itseffect,asshowninTable4.Theimpactofpruningisminimalwhendealingwithdatasetsof lowcomplexity,suchasDB2andDB3.PruninginStageIresultsinlessthana10%increaseinthe overallF1scoreonDB3.However,forcomplexcontractvulnerabilitydetectiononDB1,StageI pruningsignificantlyenhancesaccuracy.TheF1scoreimprovesfrom6.59%to78.65%,achievinga 11timesincreaseinresults. Table4. StatisticsofDetectionResultsbyDifferentTools Dataset DB1 DB2 DB3 Tools StageI StageII StageI&II StageI StageII StageI&II StageI StageII StageI&II #TP 70 3 70 38 16 38 30 25 30 #FP 78 7 27 146 11 31 56 1 14 #FN 11 78 11 3 25 3 1 5 1 #TN 736 807 787 21025 21160 21140 56 112 98 Precision 47.30% 30.00% 72.16% 20.65% 59.26% 55.07% 34.88% 96.15% 68.18% Recall 86.42% 3.70% 86.42% 92.68% 39.02% 92.68% 96.77% 83.33% 96.77% F1 61.14% 6.59% 78.65% 33.78% 47.06% 69.09% 51.28% 89.29% 80.00% WealsoevaluatedtheimpactofpruninginStageIontheoveralldetectiontime,asshownin Figure11.Pruningsignificantlyreducestheexecutiontime.Forcomplexcontractvulnerability detection(DB1),theaveragedetectiontimedecreasesfrom157.17sto25.26s,greatlyimproving theefficiencyofdetectingvulnerabilitiesincomplexcontracts.Thisreductioninexecutiontime highlightstheimportanceofprecisepruninginStageIforefficientvulnerabilitydetection. Fig.11. Impactofpruning Fig.12. Impactofsymbolicexecutionverification Proc.ACMSoftw.Eng.,Vol.1,No.FSE,Article8.Publicationdate:July2024.EfficientlyDetectingReentrancyVulnerabilitiesinComplexSmartContracts 8:17 AnswertoRQ3:PruninginStageIsignificantlyimprovesdetectionaccuracy,boostingtheF1 scorefrom6.59%to78.65%.Additionally,theprecisepathpruninginStageIensurestheefficiency ofvulnerabilitydetection. 4.5 ImpactofSymbolicExecutionVerificationinStageII InStageII,SliSEusessymbolicexecutiontovalidatethereachabilityofsuspiciousvulnerability paths,reducingfalsepositivesinthedetectionresults.Toevaluatetheimpactofsymbolicexecution verificationinStageIIforoveralldetection,weconductedcomparativeexperimentsbycomparing theresultsbetweenStageI andStageI&II modes(representingwithoutandwithStageII).Table4 presentsthestatisticsforStageIandStageI&II.ThedataclearlyshowthatStageIIsymbolic executionverificationsignificantlyreducesthenumberoffalsepositives(FPs)intheresults.For ReentrancyvulnerabilitydetectiononDB1onlywithStageI,therewere78FPs.Whencombined withStageIIsymbolicexecutionverification,thenumberofFPsnotablydecreased,resultingina substantialincreaseinprecisionfrom47.30%to72.16%.Thishighlightsthatthesymbolicexecution verificationinStageIIensuresmoreprecisevulnerabilitydetectionresults. WealsoanalyzedthetimeoverheadassociatedwithStageII’ssymbolicexecutionverification. AsshowninFigure12,theoverheadintroducedbysymbolicexecutionverificationislessthan7 secondsonDB2,DB3,withtheaveragedetectiontimeremainingat25.26secondsforDB1.This timeinvestmentissignificantlylowercomparedtotheruntimeofmostsymbolicexecutiontools andissuitableforbatchdetectionpurposes. AnswertoRQ4:SymbolicexecutionverificationinStageIIsignificantlyreducesFPs,boosting precisionfrom47.30%to72.16%onDB1.Moreover,symbolicexecutionverificationincurssome timeoverhead,theoveralldetectiontimeremainsreasonable,ensuringefficientdetectionprocesses. 5 DISCUSSION 5.1 CapabilityAnalysisofExistingTools Zhengetal.[45]conductedananalysisof1,322open-sourceDAppsauditreportsfrom30security auditteams,compilingalarge-scale,fair,real-worldsmartcontractvulnerabilitydatasetknownas DB1.Theirexperimentsrevealedthatexistingtoolsperformedpoorlyintermsofeffectivenessand successfuldetectionrates,suggestingthatfuturedevelopmentshouldprioritizereal-worldcomplex contractsoversimpletoycontracts.Toassessthecapabilityofexistingtoolstodetectcomplex contracts,wequantifiedthecomplexityofthesecontractsandconductedcomparativeanalysis. Table5. Detectioncapabilityofexistingtoolsforcomplexcontracts AvgCyclomatic MaxCyclomatic SumCyclomatic MaxNesting CountContractCoupled Detected Avg. Detected Avg. Detected Avg. Detected Avg. Detected Avg. Mean 1.3(36.1%) 3.6 4.0(18.7%) 21.4 39.5(15.6%) 253.5 1.8(75.0%) 2.4 0.8(34.8%) 2.3 Max 2.0(25.3%) 7.9 11.0(14.9%) 74.0 104.0(11.0%) 949.0 4.0(33.3%) 12.0 3.0(25.0%) 12.0 Median 1.2(36.4%) 3.3 2.0(12.1%) 16.5 25.0(14.6%) 171.0 1.0(50.0%) 2.0 0.0(0.0%) 2.0 Std. 0.3(20.0%) 1.5 3.2(19.8%) 16.2 34.2(14.5%) 236.1 1.5(88.2%) 1.7 1.3(48.1%) 2.7 *Detectedmeanssuccessfullydetectedbyexistingtool,Avg.meanstheoverallaveragecomplexity. WeutilizedtheComplexityMetricsproposedbyChaoetal.[29]toquantifythecomplexityof
contractsandassessthedetectioncapabilitiesofexistingtoolsoncomplexcontracts.Thesemetrics provideinsightsintothecomplexityandinterdependenciesoffunctionsandcontracts.Specifically, AvgCyclomatic calculatestheaveragecyclomaticcomplexityamongfunctions,MaxCyclomatic identifiesthemostcomplexfunction,andSumCyclomaticaggregatescomplexitiesacrossfunctions. MaxNestingrevealsthedeepestnestingofcontrolstructuresinfunctions,whileCountContract- Coupled countsinterconnectedcontracts,indicatingtheirdependencies.Wecomparedtheaverage Proc.ACMSoftw.Eng.,Vol.1,No.FSE,Article8.Publicationdate:July2024.8:18 ZexuWang,JiachiChen,YanlinWang,YuZhang,WeizheZhang,andZibinZheng complexitymetricsofcontractssuccessfullydetectedbyexistingtoolswiththeoverallaverage complexitymetricsofDB1,asshowninTable5.Thedatainthetablerevealsthatonlyaround30% oftheoverallcontractcomplexitycanbesuccessfullydetectedbythesetools.Thishighlightsthe limitedcapabilitiesofexistingtoolsinidentifyingvulnerabilitiesinreal-worldcomplexcontracts. TheyareinsufficientforaddressingthesecurityneedsofpracticalcontractsinDApps,indicatinga significantchallengeindetectingReentrancyvulnerabilitiesincomplexcontracts. 5.2 ThreatstoValidity External Validity. We validated SliSE’s Reentrancy vulnerability detection performance on Ethereum using the DB2 and DB3 datasets. DB2 collected from positive labels in existing tool detectionresultsonEthereumcontracts,withoutconsideringnegativelabels.However,labeling allEthereumcontractsistime-consuminganderror-prone,whichdemandsasubstantialteamof experiencedengineersforReentrancyvulnerabilityanalysis.DB2wascreatedbysubjecting230,548 verifiedsmartcontractsfromEtherscantoscansbyfiveautomateddetectiontools[1,8,9,25,37], followed by two rounds of manual examination, Zheng et al. underscored the error-prone na- ture of vulnerability labeling [46]. In addition, DB3 is the well-known SmartBugs dataset [31]. Thesedatasetsarewidelyacknowledgedandhavebeenopenlysharedattopsoftwareengineering conferences,establishingthemasreliablesourcesofgroundtruthforEthereumcontracts. InternalValidity.Whilevariouspermissioncontrolsinsmartcontractsareessentialforsecurity, theypresentachallengethatdemandsspecificsemanticcomprehensiontoassesspathfeasibility. SliSE does not account for these specific semantic requirements, resulting in false positives in detection results. To expand the scope of vulnerability detection, we draw inspiration from a rangeofpatternsinpathprotectivetechniques (PPTs)[40].Thesepatternshavebeenidentified throughempiricalresearchandhelpidentifycommonfalsepositivesinstate-of-the-arttoolrules. Byapplyingthesepatterns,weeffectivelyreducefalsepositives,therebyimprovingscalabilityand effectiveness. 6 RELATEDWORK 6.1 ReentrancyVulnerabilityDetection TherearenumeroustoolsthatsupportReentrancyvulnerabilitydetection,utilizingvariousvul- nerabilitydetectiontechniques,includingstaticanalysis,symbolicexecution,fuzztesting,and formalverification.ToolslikeOyente[25],Osiris[36]utilizeCFG-basedbytecodeanalysiswith patternmatchingforfastandscalableReentrancyvulnerabilitydetection.Slither[12]generates intermediatelanguageSlithIRbyanalyzingsmartcontractAST,employingrule-baseddetection forvulnerabilities,andscalingupto5typesofReentrancyvulnerabilities.Clairvoyance[40]in- troducestaintanalysisviashadowstackstoreducefalsepositiveswhilecombininglightweight symbolicexecutionforvulnerabilitydetection.Mythril[9],Manticore[27]useemulationtesting togeneratesequencesoftransactionstriggeringvulnerabilities.MPro[42]simulatesexecution byincreasingthedepthofsymbolicexecutionanalysis,exploringdeeperlevelsofthestatespace. Smartian[8]appliesdataflowanalysistoguidepathexploration,generatingcriticaltransaction sequences triggering vulnerabilities. Sailfish [1] analyzes program dependency information to generateStorageDependencyGraph(SDG)detectsdangerousaccesspatterns,butcannotanalyze globalcross-contractdataflow.Pluto[26]supportscross-contractvulnerabilitydetectionbycon- structingInter-ContractControlFlowGraph(I-CFG)toenablecross-contractbytecodetraversal, then exploring I-CFG to detect vulnerabilities. Park [43] proposed a method based on parallel symbolicexecution,whichproposedadynamicforkingalgorithmbasedonprocessforkingto speedupvulnerabilitydetection. Proc.ACMSoftw.Eng.,Vol.1,No.FSE,Article8.Publicationdate:July2024.EfficientlyDetectingReentrancyVulnerabilitiesinComplexSmartContracts 8:19 Differingfromexistingmethods.SliSEconstructstheInter-contractProgramDependency Graph(I-PDG)toprovideglobalcontrolanddatadependencieswithincontracts,includingcross- contractdataflows.Incontrast,Sailfish[1]exclusivelyconductsprogramdependencyanalysison statevariableswithinsinglecontractanddoesnotsupportcross-contractanalysis.EtherSolve[10] calculatesdynamicjumpaddressusingthesymbolicstack,whichalwaysfailsincomplexcontracts duetoissueswithexcessiverecursiondepth.SliSEleveragesconstantpropagationanalysiswith
SSA to quickly compute dynamic jump addresses and recovery the CFG paths. Compared to Pluto[26],SliSEprimarilyslicesoutcodeblocksrelatedtoReentrancyvulnerabilitiesfromcomplex contractsforfocusedanalysis,employingsymbolicexecutionforefficientandreliableverification. EvaluationresultsshowthatSliSEoutperformsmanystate-of-the-arttoolsindetectingReentrancy vulnerabilitiesincomplexcontracts. 7 CONCLUSION In this paper, we introduce a tool named SliSE, designed for efficient detection of Reentrancy vulnerabilitiesincomplexcontracts.Thedetectionprocessconsistsoftwostages:WarningSearch andSymbolicExecutionVerification.IntheWarningSearchstage,SliSEanalyzestheInter-contract ProgramDependencyGraph(I-PDG)withprogramslicing,collectingsuspiciousvulnerabilityinfor- mationaswarnings.IntheSymbolicExecutionVerificationstage,itemployssymbolicexecution totraversethepathsindicatedbythewarninginformationandvalidatetheirreachability,ensuring effectivevulnerabilitydetection.Incomparativeexperiments,SliSEachievesimpressiveresults, withanF1scoreof78.65%,surpassingthehighestscoreof9.26%achievedbyeightexistingstate- of-the-arttools.Additionally,itachievesarecallrateexceeding90%forReentrancyvulnerability detection of contracts on Ethereum. Overall, SliSE provides an effective solution for detecting reentrancyvulnerabilitiesincomplexcontracts. ACKNOWLEDGMENTS TheworkdescribedinthispaperissupportedbytheNationalNaturalScienceFoundationofChina (62032025,62302534,62332004),andtheMajorKeyProjectofPengChengLaboratoryunderGrant PCL2023A05-2. REFERENCES [1] PriyankaBose,DipanjanDas,YanjuChen,YuFeng,ChristopherKruegel,andGiovanniVigna.2022.SAILFISH:Vetting SmartContractState-InconsistencyBugsinSeconds.In2022IEEESymposiumonSecurityandPrivacy(SP).161–178. https://doi.org/10.1109/SP46214.2022.9833721 [2] GerardoCanfora,AnielloCimitile,andAndreaDeLucia.1998.Conditionedprogramslicing.InformationandSoftware Technology40,11-12(1998),595–607. [3] EthanCecchetti,SiqiuYao,HaobinNi,andAndrewC.Myers.2021.CompositionalSecurityforReentrantApplications. In2021IEEESymposiumonSecurityandPrivacy(SP).1249–1267. https://doi.org/10.1109/SP40001.2021.00084 [4] StefanosChaliasos,MarcosAntoniosCharalambous,LiyiZhou,RafailaGalanopoulou,ArthurGervais,Dimitris Mitropoulos,andBenLivshits.2023.Smartcontractanddefisecurity:Insightsfromtoolevaluationsandpractitioner surveys.arXivpreprintarXiv:2304.02981(2023). [5] JiachiChen,MingyuanHuang,ZeweiLin,PeilinZheng,andZibinZheng.2023.ToHealthierEthereum:ACompre- hensiveandIterativeSmartContractWeaknessEnumeration. arXiv:cs.SE/2308.10227 [6] JiachiChen,XinXia,DavidLo,JohnGrundy,XiapuLuo,andTingChen.2022.DefiningSmartContractDefectson Ethereum.IEEETransactionsonSoftwareEngineering48,1(2022),327–345. https://doi.org/10.1109/TSE.2020.2989002 [7] TingChen,YufeiZhang,ZihaoLi,XiapuLuo,TingWang,RongCao,XiuzhuoXiao,andXiaosongZhang.2019. TokenScope:AutomaticallyDetectingInconsistentBehaviorsofCryptocurrencyTokensinEthereum.InProceedings ofthe2019ACMSIGSACConferenceonComputerandCommunicationsSecurity(CCS’19).AssociationforComputing Machinery,NewYork,NY,USA,1503–1520. https://doi.org/10.1145/3319535.3345664 [8] JaeseungChoi,DoyeonKim,SoominKim,GustavoGrieco,AlexGroce,andSangKilCha.2021.SMARTIAN:Enhancing SmartContractFuzzingwithStaticandDynamicData-FlowAnalyses.In202136thIEEE/ACMInternationalConference onAutomatedSoftwareEngineering(ASE).227–239. https://doi.org/10.1109/ASE51524.2021.9678888 Proc.ACMSoftw.Eng.,Vol.1,No.FSE,Article8.Publicationdate:July2024.8:20 ZexuWang,JiachiChen,YanlinWang,YuZhang,WeizheZhang,andZibinZheng [9] ConsenSys.2020.Mythril. https://github.com/ConsenSys/mythril [10] FilippoContro,MarcoCrosara,MarianoCeccato,andMilaDallaPreda.2021. Ethersolve:Computinganaccurate control-flowgraphfromethereumbytecode.In2021IEEE/ACM29thInternationalConferenceonProgramComprehension (ICPC).IEEE,127–137. [11] PhilDaian.2016.AnalysisoftheDAOexploit. https://hackingdistributed.com/2016/06/18/analysis-of-the-dao-exploit/ [12] JosselinFeist,GustavoGrieco,andAlexGroce.2019.Slither:astaticanalysisframeworkforsmartcontracts.In2019 IEEE/ACM2ndInternationalWorkshoponEmergingTrendsinSoftwareEngineeringforBlockchain(WETSEB).IEEE, 8–15. [13] AsemGhaleb,JuliaRubin,andKarthikPattabiraman.2022.eTainter:detectinggas-relatedvulnerabilitiesinsmart contracts.InProceedingsofthe31stACMSIGSOFTInternationalSymposiumonSoftwareTestingandAnalysis.728–739. [14] SeungwonGo.2018.SmartContract:SecurityPatterns. https://medium.com/returnvalues/smart-contract-security- patterns-79e03b5a1659 [15] DaojunHan,QiuyueLi,LeiZhang,TaoXu,etal.2023.ASmartContractVulnerabilityDetectionModelBasedon
SyntacticandSemanticFusionLearning.WirelessCommunicationsandMobileComputing2023(2023). [16] MaryJeanHarrold,BrianMalloy,andGreggRothermel.1993.Efficientconstructionofprogramdependencegraphs. ACMSIGSOFTSoftwareEngineeringNotes18,3(1993),160–170. [17] JingxuanHe,MislavBalunović,NodarAmbroladze,PetarTsankov,andMartinVechev.2019.LearningtoFuzzfrom SymbolicExecutionwithApplicationtoSmartContracts.InProceedingsofthe2019ACMSIGSACConferenceon ComputerandCommunicationsSecurity(CCS’19).AssociationforComputingMachinery,NewYork,NY,USA,531–548. https://doi.org/10.1145/3319535.3363230 [18] SebastianHoller,SebastianBiewer,andClaraSchneidewind.2023. HoRStify:SoundSecurityAnalysisofSmart Contracts.arXivpreprintarXiv:2301.13769(2023). [19] insurgent.2022.SoliditySmartContractSecurity:4WaystoPreventReentrancyAttacks. https://betterprogramming. pub/solidity-smart-contract-security-preventing-reentrancy-attacks-fc729339a3ff [20] ThomasShababiJacquesDafflon,JordiBaylina.2017.ERC-777:TokenStandard. https://eips.ethereum.org/EIPS/eip-777 [21] BoJiang,YeLiu,andW.K.Chan.2018. ContractFuzzer:FuzzingSmartContractsforVulnerabilityDetection.In Proceedingsofthe33rdACM/IEEEInternationalConferenceonAutomatedSoftwareEngineering(ASE’18).Association forComputingMachinery,NewYork,NY,USA,259–269. https://doi.org/10.1145/3238147.3238177 [22] SukritKalra,SeepGoel,MohanDhawan,andSubodhSharma.2018. Zeus:analyzingsafetyofsmartcontracts..In Ndss.1–12. [23] JohannesKruppandChristianRossow.2018.teEther:GnawingatEthereumtoAutomaticallyExploitSmartContracts. In27thUSENIXSecuritySymposium(USENIXSecurity18).1317–1333. [24] ZeqinLiao,ZibinZheng,XiaoChen,andYuhongNan.2022.SmartDagger:ABytecode-BasedStaticAnalysisApproach forDetectingCross-ContractVulnerability.InProceedingsofthe31stACMSIGSOFTInternationalSymposiumon SoftwareTestingandAnalysis(ISSTA2022).AssociationforComputingMachinery,NewYork,NY,USA,752–764. https://doi.org/10.1145/3533767.3534222 [25] LoiLuu,Duc-HiepChu,HrishiOlickel,PrateekSaxena,andAquinasHobor.2016.MakingSmartContractsSmarter. InProceedingsofthe2016ACMSIGSACConferenceonComputerandCommunicationsSecurity(CCS’16).Association forComputingMachinery,NewYork,NY,USA,254–269. https://doi.org/10.1145/2976749.2978309 [26] FuchenMa,ZhenyangXu,MengRen,ZijingYin,YuanliangChen,LeiQiao,BinGu,HuizhongLi,YuJiang,andJiaguang Sun.2021.Pluto:Exposingvulnerabilitiesininter-contractscenarios.IEEETransactionsonSoftwareEngineering48,11 (2021),4380–4396. [27] MarkMossberg,FelipeManzano,EricHennenfent,AlexGroce,GustavoGrieco,JosselinFeist,TrentBrunson,and ArtemDinaburg.2019.Manticore:Auser-friendlysymbolicexecutionframeworkforbinariesandsmartcontracts.In 201934thIEEE/ACMInternationalConferenceonAutomatedSoftwareEngineering(ASE).IEEE,1186–1189. [28] TaiD.Nguyen,LongH.Pham,JunSun,YunLin,andQuangTranMinh.2020.SFuzz:AnEfficientAdaptiveFuzzerfor SoliditySmartContracts.InProceedingsoftheACM/IEEE42ndInternationalConferenceonSoftwareEngineering(ICSE ’20).AssociationforComputingMachinery,NewYork,NY,USA,778–788. https://doi.org/10.1145/3377811.3380334 [29] ChaoNi,CongTian,KaiwenYang,DavidLo,JiachiChen,andXiaohuYang.2023.AutomaticIdentificationofCrash- inducingSmartContracts.In2023IEEEInternationalConferenceonSoftwareAnalysis,EvolutionandReengineering (SANER).108–119. https://doi.org/10.1109/SANER56733.2023.00020 [30] MichaelRodler,DavidPaaßen,WentingLi,LukasBernhard,ThorstenHolz,GhassanKarame,andLucasDavi.2023. EF/CF:HighPerformanceSmartContractFuzzingforExploitGeneration.arXivpreprintarXiv:2304.06341(2023). [31] smartbugs.2020.Smartbugswilddataset. https://github.com/smartbugs/smartbugs-wild [32] SunbeomSo,SeongjoonHong,andHakjooOh.2021.SmarTest:EffectivelyHuntingVulnerableTransactionSequences inSmartContractsthroughLanguageModel-GuidedSymbolicExecution.In30thUSENIXSecuritySymposium(USENIX Security21).1361–1378. Proc.ACMSoftw.Eng.,Vol.1,No.FSE,Article8.Publicationdate:July2024.EfficientlyDetectingReentrancyVulnerabilitiesinComplexSmartContracts 8:21 [33] JianzhongSu,Hong-NingDai,LingjunZhao,ZibinZheng,andXiapuLuo.2023.EffectivelyGeneratingVulnerable TransactionSequencesinSmartContractswithReinforcementLearning-GuidedFuzzing.In37thIEEE/ACMInterna- tionalConferenceonAutomatedSoftwareEngineering(ASE22).AssociationforComputingMachinery,NewYork,NY, USA,Article36,12pages. https://doi.org/10.1145/3551349.3560429 [34] PhuwanaiThummavet.2022. SoliditySecurityByExample04:Cross-FunctionReentrancy. https://medium.com/ valixconsulting/solidity-smart-contract-security-by-example-04-cross-function-reentrancy-de9cbce0558e
[35] PhuwanaiThummavet.2022. SoliditySecurityByExample05:Cross-ContractReentrancy. https://medium.com/ valixconsulting/solidity-smart-contract-security-by-example-05-cross-contract-reentrancy-30f29e2a01b9 [36] ChristofFerreiraTorres,JulianSchütte,andRaduState.2018. Osiris:Huntingforintegerbugsinethereumsmart contracts.InProceedingsofthe34thAnnualComputerSecurityApplicationsConference.664–676. [37] PetarTsankov,AndreiDan,DanaDrachsler-Cohen,ArthurGervais,FlorianBünzli,andMartinVechev.2018.Securify: PracticalSecurityAnalysisofSmartContracts.InProceedingsofthe2018ACMSIGSACConferenceonComputer andCommunicationsSecurity(CCS’18).AssociationforComputingMachinery,NewYork,NY,USA,67–82. https: //doi.org/10.1145/3243734.3243780 [38] PhilippeCastonguayWitekRadomski,AndrewCooke.2018.ERC-1155:MultiTokenStandard. https://eips.ethereum. org/EIPS/eip-1155 [39] GavinWoodetal.2014. Ethereum:Asecuredecentralisedgeneralisedtransactionledger. Ethereumprojectyellow paper151,2014(2014),1–32. [40] JiamingYe,MingliangMa,YunLin,YuleiSui,andYinxingXue.2020.Clairvoyance:Cross-contractstaticanalysisfor detectingpracticalreentrancyvulnerabilitiesinsmartcontracts.InProceedingsoftheACM/IEEE42ndInternational ConferenceonSoftwareEngineering:CompanionProceedings.274–275. [41] MengyaZhang,XiaokuanZhang,YinqianZhang,andZhiqiangLin.2020. TXSPECTOR:UncoveringAttacksin EthereumfromTransactions.InProceedingsofthe29thUSENIXConferenceonSecuritySymposium(SEC’20).USENIX Association,USA,Article156,18pages. [42] WilliamZhang,SebastianBanescu,LeonardoPasos,StevenStewart,andVijayGanesh.2019.Mpro:Combiningstatic andsymbolicanalysisforscalabletestingofsmartcontract.In2019IEEE30thInternationalSymposiumonSoftware ReliabilityEngineering(ISSRE).IEEE,456–462. [43] PeilinZheng,ZibinZheng,andXiapuLuo.2022. Park:AcceleratingSmartContractVulnerabilityDetectionvia Parallel-ForkSymbolicExecution.InProceedingsofthe31stACMSIGSOFTInternationalSymposiumonSoftware TestingandAnalysis(ISSTA2022).AssociationforComputingMachinery,NewYork,NY,USA,740–751. https: //doi.org/10.1145/3533767.3534395 [44] ZibinZheng,KaiwenNing,YanlinWang,JingwenZhang,DewuZheng,MingxiYe,andJiachiChen.2024.ASurveyof LargeLanguageModelsforCode:Evolution,Benchmarking,andFutureTrends. arXiv:cs.SE/2311.10372 [45] ZibinZheng,JianzhongSu,JiachiChen,DavidLo,ZhijieZhong,andMingxiYe.2023.DAppSCAN:BuildingLarge-Scale DatasetsforSmartContractWeaknessesinDAppProjects. arXiv:cs.SE/2305.08456 [46] ZibinZheng,NengZhang,JianzhongSu,ZhijieZhong,MingxiYe,andJiachiChen.2023.TurntheRudder:ABeaconof ReentrancyDetectionforSmartContractsonEthereum.InProceedingsofthe45thInternationalConferenceonSoftware Engineering(ICSE’23).IEEEPress,295–306. https://doi.org/10.1109/ICSE48619.2023.00036 [47] LiyiZhou,XihanXiong,JensErnstberger,StefanosChaliasos,ZhipengWang,YeWang,KaihuaQin,RogerWattenhofer, DawnSong,andArthurGervais.2023.Sok:Decentralizedfinance(defi)attacks.In2023IEEESymposiumonSecurity andPrivacy(SP).IEEE,2444–2461. [48] ShunfanZhou,ZheminYang,JieXiang,YinzhiCao,MinYang,andYuanZhang.2020. AnEver-EvolvingGame: EvaluationofReal-WorldAttacksandDefensesinEthereumEcosystem.InProceedingsofthe29thUSENIXConference onSecuritySymposium(SEC’20).USENIXAssociation,USA,Article157,17pages. Received2023-09-27;accepted2024-01-23 Proc.ACMSoftw.Eng.,Vol.1,No.FSE,Article8.Publicationdate:July2024.
2403.14274 Multi-role Consensus through LLMs Discussions for Vulnerability Detection Zhenyu Mao1, Jialong Li1,∗, Dongming Jin2, Munan Li3, and Kenji Tei4 1 Waseda University, Tokyo, Japan 2 Peking University, Beijing, China 3 Dalian Maritime University, Dalian, China 4 Tokyo Institute of Technology, Tokyo, Japan *Corresponding Author: lijialong@fuji.waseda.jp Abstract—Recent advancements in large language models tester receives the initial prompt detailing its role-setting, its (LLMs) have highlighted the potential for vulnerability de- task, and the code segment to analyze. The tester is asked to tection, a crucial component of software quality assurance. output its first response, a textual completion that includes a Despite this progress, most studies have been limited to the judgment, restricted to a binary indicator (1 for vulnerable, perspective of a single role, usually testers, lacking diverse 0 for non-vulnerable), and a brief reasoning. The response viewpoints from different roles in a typical software de- is constrained by a maximum token limit, ensuring that the velopment life-cycle, including both developers and testers. tester’s reasoning is both precise and substantive, facilitating To this end, this paper introduces a multi-role approach to clarity and efficiency in the subsequent dialectic interaction. employ LLMs to act as different roles simulating a real-life Then this initial judgment with reasoning is forwarded to the code review process and engaging in discussions toward a developer, together with a similar initial prompt. consensusontheexistenceandclassificationofvulnerabilities Discussion stage: The discussion stage aims to realize an in the code. Preliminary evaluation of this approach indicates iterative output exchange in an attempt to reach a collectively a 13.48% increase in the precision rate, an 18.25% increase multi-perspective consensus inside the code review team. in the recall rate, and a 16.13% increase in the F1 score. The tester and the developer, equipped with their unique perspectiveandjudgments,enteradialecticinteraction,aimed Keywords–large language models; vulnerability detection; at exploring and resolving different opinions on potential prompt engineering; software quality assurance vulnerabilities. During this stage, the tester and the developer 1. INTRODUCTION repeat a ”pose query - deduce response - relay insight” loop, which serves as an incremental prompt, pushing participants As a fundamental process in software engineering, vulnera- to re-evaluate and polish their judgment and reasoning. A bility detection plays a critical role in ensuring the quality maximumdepthondiscussionroundsispre-settopreventthe and security of software systems by identifying potential dialogues from devolving into an endless cycle, ensuring that security threats [1], [2]. Nowadays, with the emergence of the discussion remains both goal-oriented and time-efficient. large language models (LLMs), due to their profound code Conclusion stage: The conclusion stage summarizes the dis- comprehension,LLMshavealsobeenemployedfortheappli- cussions and outputs the final result. Once a consensus or cation in the realm of vulnerability detection [3], [4], [5]. the pre-set maximum discussion depth is reached, the tester’s However, a notable limitation in these studies is that they latest judgment is recorded as the final judgment, as the tester often only consider the dominant tester-centric perspective in usually holds primary responsibility in the review process. the prompt, such as asking LLMs to act as quality assurance engineers to detect potential vulnerabilities. This single-role approach results in an imperfect understanding of the inten- tions behind the code, an incomplete exploration of potential issues, and consequently, reduced detection efficiency. To avoidthis,thereal-worldcodereviewprocessusuallyinvolves thecollaborationofteammembers(e.g.,developerandtester) possessing diverse roles, responsibilities, and viewpoints. Inspired by this, this paper introduces a novel multi-role approach, namely Multi-role Consensus through LLMs Dis- Figure1. Overviewofthemulti-roleapproach cussions,foremployingLLMstoactasmultipledifferentroles in vulnerability detection, simulating a real-life code review 3. PRELIMINARYEVALUATION process. The key idea of this approach is to integrate the The preliminary evaluation is driven by the research question diverse perspectives of team members with different roles, asfollows:Inwhatwaysdoestheproposedapproachimprove thereby reaching a well-informed and collective consensus the performance of vulnerability detection? regarding the classification of the vulnerability in the code. 3.1 Experiment Settings 2. MULTI-ROLEAPPROACH Dataset:Thedatasetusedfortheexperiment,asreferencedin Initialization stage:Theinitiationstageisdesignedtoenable [6],isaC/C++datasetconsistingofbothvulnerableandnon- the tester to independently provide an initial judgment. The vulnerable code segments across four categories: library/API 4202 yaM 81 ]ES.sc[ 4v47241.3042:viXraTABLEI EXPERIMENTRESULTS Testing Vulner- Precision rate Recall rate F1 score data ability single-role multi-role single-role multi-role single-role multi-role group1 category basic CoT basic CoT basic CoT basic CoT basic CoT basic CoT Group1 FC 0.735 0.756 0.830 0.837 0.610 0.643 0.801 0.796 0.667 0.695 0.816 0.816 Group1 AE 0.750 0.756 0.837 0.854 0.618 0.634 0.779 0.818 0.677 0.689 0.807 0.835 Group1 AU 0.772 0.759 0.860 0.856 0.668 0.695 0.851 0.896 0.716 0.725 0.856 0.875
Group1 PU 0.753 0.769 0.832 0.847 0.629 0.654 0.791 0.875 0.685 0.707 0.811 0.861 Group2 FC 0.568 0.564 0.637 0.641 0.640 0.660 0.752 0.718 0.602 0.608 0.690 0.677 Group2 AE 0.575 0.553 0.627 0.647 0.628 0.640 0.732 0.750 0.600 0.593 0.675 0.694 Group2 AU 0.549 0.579 0.651 0.639 0.652 0.696 0.750 0.778 0.596 0.632 0.697 0.702 Group2 PU 0.550 0.536 0.619 0.632 0.674 0.698 0.778 0.782 0.606 0.606 0.690 0.699 Group3 FC 0.196 0.204 0.225 0.232 0.650 0.695 0.730 0.765 0.302 0.315 0.344 0.356 Group3 AE 0.197 0.198 0.229 0.229 0.660 0.680 0.755 0.765 0.304 0.306 0.352 0.352 Group3 AU 0.218 0.211 0.238 0.246 0.735 0.710 0.805 0.830 0.336 0.325 0.368 0.380 Group3 PU 0.199 0.197 0.224 0.237 0.690 0.705 0.755 0.790 0.308 0.309 0.345 0.364 1 Group1 consists 800 vulnerable and 200 non-vulnerable code segments; Group2: 500 and 500; Group3: 200 and 800. function call (FC), arithmetic expression (AE), array usage segment is non-vulnerable. Given the limitations on paper (AU), and pointer usage (PU). length, please refer to the complete discussion record at Task:LLMsaretaskedwithabinaryclassificationonwhether github.com/rockmao45/LLMVulnDetection. the provided code segment contains any vulnerability that The experiment results indicate a more obvious improvement, belongs to FC, AE, AU, or PU. especially in the recall rate and F1 score when the proportion Baseline: All LLMs used in this experiment are instances ofvulnerabledatainthetestingdatasetishigher.Thisislikely of the gpt-3.5-turbo-0125 model. The proposed multi-role because the rounds of discussions allows LLMs to explore a approach is compared to the result obtained from the single- broader spectrum of potential vulnerabilities. role LLM approach. Inspired by [3], the following two types 4. CONCLUSIONANDFUTUREWORK of prompts are used to test these two approaches. The basic This paper proposes a novel approach for improving vulner- prompt directly asks the LLM about the presence of any ability detection with LLMs, where LLMs act as different vulnerabilityinFC,AE,AU,orPUwithoutadditionalcontext. roles in a real-life code review team, discussing the exis- Chain-of-thought (CoT), i.e., step-by-step, is also used to tenceandclassificationofvulnerabilitiestowardsaconsensus. analyze the existence of a particular vulnerability. Preliminary evaluations have shown notable improvements in Discussionconstraints:Themaximumdiscussiondepthisset precision rates,recall rates andF1 scores. Futurework should to 5, and the maximum response length is set to 120 tokens. enhance this approach by integrating in-context learning to Metrics: To assess the enhancements in vulnerability detec- guide LLMs to better collaborate in their discussions. tion performance, this study employs three key metrics: the precision rate, the recall rate, and the F1 score [3]. REFERENCES 3.2 Results and Discussions [1] Z. Li et al., “On the effectiveness of function-level vul- Table I summarizes the experiment results, highlighting the nerability detectors for inter-procedural vulnerabilities,” improvements through the proposed approach. On average, ICSE, 2024. there is a 13.48% increase in the precision rate, an 18.25% [2] B. Steenhoek et al., “Dataflow analysis-inspired deep increase in the recall rate, and a 16.13% increase in the learning for efficient vulnerability detection,” ICSE, 2024. F1 score. In terms of computation costs, due to the need [3] C. Zhang et al., “Prompt-enhanced software vulnerability for conversation between different roles, it requires a 484% detection using chatgpt,” ICSE, 2024. increase in the number of tokens consumed. [4] Y. Nong et al., “Chain-of-thought prompting of large Anillustrativediscussionrecorddemonstratinghowthedevel- language models for discovering and fixing software vul- oper contributed to enhancing the tester’s detection accuracy nerabilities,” 2024. is as follows. Initially, the tester identified a code segment [5] X. Zhou et al., “Large language model for vulnerability as vulnerable due to the lack of validation or sanitization detection: Emerging results and future directions,” ICSE, during the concatenation of two buffers. However, as the 2024. developer pointed out that the wcsncat function and the [6] Z.Lietal.,“Sysevr:Aframeworkforusingdeeplearning proper size argument are used to prevent a buffer overflow, to detect software vulnerabilities,” IEEE Trans. on De- the tester finally made the correct judgment that this code pendable and Secure Computing, 2022.
2403.17218 A Comprehensive Study of the Capabilities of Large Language Models for Vulnerability Detection Benjamin Steenhoek Md Mahbubur Rahman Monoshi Kumar Roy Iowa State University Iowa State University Iowa State University Ames, Iowa, USA Ames, Iowa, USA Ames, Iowa, USA benjis@iastate.edu mdrahman@iastate.edu monoshi@iastate.edu Mirza Sanjida Alam Earl T. Barr Wei Le Iowa State University University College London Iowa State University Ames, Iowa, USA London, UK Ames, Iowa, USA sanjida@iastate.edu e.barr@ucl.ac.uk weile@iastate.edu Abstract—Large Language Models (LLMs) have demonstrated reasoning. Pattern-matching on code structures is insufficient greatpotentialforcodegenerationandothersoftwareengineering to produce precise analyses [67, 53], especially for real-world tasks.Vulnerabilitydetectionisofcrucialimportancetomaintain- code. For example, to precisely detect a buffer overflow, we ingthesecurity,integrity,andtrustworthinessofsoftwaresystems. cannotonlyscanforstrcpyormallocstatements.Weneed Precise vulnerability detection requires reasoning about the code, making it a good case study for exploring the limits of LLMs’ to identify the statements that update the strings and buffers, reasoning capabilities. Although recent work has applied LLMs reason about the lengths of the strings after the changes at to vulnerability detection using generic prompting techniques, these statements, and also understand the bounds-check code their full capabilities for this task and the types of errors they to judge whether the protection is sufficient. make when explaining identified vulnerabilities remain unclear. LLMshaveshownlimitedabilityforcomplexreasoning[27], In this paper, we surveyed eleven LLMs that are state-of-the- andarecentsurveycallsformorechallengingtaskstoevaluate art in code generation and commonly used as coding assistants, and evaluated their capabilities for vulnerability detection. their reasoning abilities [28]; this calls into question whether We systematically searched for the best-performing prompts, LLMspossessthecapabilitiestoperformeffectivevulnerability incorporating techniques such as in-context learning and chain- detection. This capability has profound implications for the of-thought, and proposed three of our own prompting methods. potential applications of LLMs in other tasks which require Ourresultsshowthatwhileourpromptingmethodsimprovedthe understandingandreasoningaboutsemanticsofcode,e.g.,fault models’performance,LLMsgenerallystruggledwithvulnerability detection. They reported 0.5-0.63 Balanced Accuracy and failed localization, program repairs, debugging and code generation to distinguish between buggy and fixed versions of programs for non-trival tasks. Studying LLMs for vulnerability detection in 76% of cases on average. By comprehensively analyzing and alsoshedlightsfortheindustrypractitionersonwhetherLLMs categorizing287instancesofmodelreasoning,wefoundthat57% can be considered as their software assurance tools. of LLM responses contained errors, and the models frequently Several studies have shown that LLMs struggle to attain predicted incorrect locations of buggy code and misidentified bug types. LLMs only correctly localized 6 out of 27 bugs high performance on vulnerability detection [23, 63, 46, 71]. in DbgBench, and these 6 bugs were predicted correctly by LLMs generally underperformed with basic prompts [23], 70-100% of human participants. These findings suggest that which motivated the use of in-context examples [63], chain-of- despitetheirpotentialforothertasks,LLMsmayfailtoproperly thoughts [46] and static analysis [71]. Prior studies primarily comprehendcriticalcodestructuresandsecurity-relatedconcepts. evaluate vulnerability detection performance. The very recent Our study highlights the need for further research to improve the vulnerability detection capabilities of LLMs. Our data and work (to the best of our knowledge, these papers are still code are available at https://figshare.com/s/78fe02e56e09ec49300b. in arXiv but not yet published) highlight broad errors in the models’ vulnerability detection responses [69, 46, 58, 63], but do not comprehensively investigate the LLM errors from the I. INTRODUCTION perspective of detecting vulnerabilities. Large Language Models (LLMs) have demonstrated im- Our studies, which are concurrently developed with these pressive capabilities across various software engineering tasks, works, aim for (1) comprehensiveness — we surveyed a such as code generation [12], test generation [35], and code wider variety of models and prompts in different scenarios summarization [3]. Their versatile capabilities, along with of vulnerability detection, and analyzed more LLM responses their novel ability to explain their predictions in plain-text, withmultipleraters,comparedtopriorwork;and(2)usefulness make LLMs worthwhile subjects of investigation for detecting —beyondevaluating“Yes/No”predictions,westudiedwhether security vulnerabilities. In static analysis and human inspec- LLMs can correctly report the types, locations and causes of tion, vulnerability detection requires code understanding and a vulnerability, as done in industry-standard static analysis- 1 4202 raM 52 ]ES.sc[ 1v81271.3042:viXrabased detectors [18, 25, 56], and point out capabilities and • Weanalyzed287LLMresponsestobreakdowntheerrors codestructureswhichLLMsstruggledwith,highlightingfuture which LLMs made, and provide insights and a dataset for research directions. In that regard, we also compared the capa- future research on vulnerability detection using LLMs.
bilities of LLMs with human performance for understanding • We evaluated LLMs’ debugging abilities in comparison vulnerabilities. Specifically, we designed the following four with humans on the DbgBench vulnerability benchmark. questions to guide our research: II. STUDYSETUP • RQ1: What are the most successful and least successful A. Dataset prompt designs for LLM-based vulnerability detection? • RQ2: How well do state-of-the-art LLMs perform on Weevaluatedthemodels’vulnerabilitydetectioncapabilities vulnerability detection? mainly using the SVEN dataset released by He and Vechev • RQ3: What kinds of errors do LLMs make when explain- [26]. This dataset was collected from CVE records reporting ing the vulnerabilities? real-world vulnerabilities from the CrossVul [45], BigVul [19], • RQ4: How do LLMs compare to human developers for and VUDENC [64] datasets. localizing vulnerabilities? SVEN is made of the source code of functions that were changed in vulnerability-fixing commits, where the version To thoroughly investigate these RQs, we evaluated 11 of the code before the commit is considered vulnerable and SOTA LLMs using five distinct prompt templates, including the version after the commit is considered non-vulnerable. developing three novel techniques for generating prompts. The authors curated the dataset by manually inspecting the We systematically searched among the prompts to draw out commitsbasedontheirdomainexpertisetoensurethatthedata the best performance from each model (Section III-A). We includes true security vulnerabilities/fixes and excludes data investigated their empirical performance and their capability to quality issues, including spurious commit message matches, distinguish buggy code from the corresponding fixed versions tangled commits, and project-specific APIs. Leveraging these (Section III-B). We rigorously analyzed 287 LLM responses labels, we focus on the binary classification setting: given a explaining their vulnerability predictions and categorized them prompt containing a function’s source code, models predict into 4 comprehensive error categories specifically tailored whether the function belongs in the positive (vulnerable) or for vulnerability detection, revealing the ways in which the negative (non-vulnerable) class. LLMs frequently failed and which code structures the models The entire SVEN dataset comprises 846 C/C++ functions. struggled with, as well as the effects of such errors on the We used 100 functions (50 pairs of a vulnerable function and models’ explanation quality (Section III-C). We evaluated the its corresponding fixed version) to perform the study. This models’ fault localization abilities on DbgBench, comparing is because (1) many real-world functions are very long and their performance and difficulties with those of professional do not fit the context window of the LLMs under study; (2) human software engineers to understand their similarities and running LLM queries is very costly. It takes an average of differences (Section III-D). 2 GPU-minutes per query, totaling 3.4 GPU-hours for 100 Our results show the following: most models performed functions for each prompt method per model, and about $180 best with Basic and In-Context prompting. Our proposed CoT- of OpenAI credits in total. We studied a total of 11 models SA and Contrastive Pairs, used with In-Context prompting, and for each model, we used 5 prompt methods (§ III-A1). In were beneficial for some models. LLMs performed within 0.5- addition, the models’ responses tended to vary when sampled 0.63 Balanced Accuracy, close to a random-guess baseline, multiple times. To address this variance, we ran the model and failed to distinguish buggy and fixed versions of code in inference 3 times with different random seeds and report the 76% of cases on average. 57% of LLM responses contained aggregated results. errors in Code Understanding, Hallucination, Logic, and/or We also used the Big-Vul [19] and D2A [73] datasets to Commonsense Knowledge, and among others, LLMs especially source in-context examples and extra metadata to generate our struggled with correctly identifying bounds/null checks. Fur- prompts(seeSectionIII-A).WeusedDbgBench[7]tocompare thermore, LLMs frequently predicted erroneous location, type, the performances of LLMs and human for vulnerability or root cause of vulnerabilities, in spite of the correct binary detection (see Section III-D). label. LLMs correctly located 6/27 bugs in DbgBench [7], all of which were correctly diagnosed by at least one human. B. Models GPT-3 performed the best among all the models and correctly We collected a list of large language models from two located 4/27 bugs. benchmark surveys [72, 41] as well as the highest-performing In summary, we make the following contributions: models from PapersWithCode [49]. We added several notable • We designed 3 new prompt templates for vulnerability open-source models (e.g. StarCoder [38]) and excluded the detection,integratinginformationfrombug-fixingpatches, models for which inference was not available via API (e.g. CVE descriptions, and static analyzers. Bard [1]), not trained on source code (e.g. Pythia [6]), were • We comprehensively evaluated the vulnerability detection trained only on distillations from ChatGPT (e.g. Vicuna [13]), capabilities of state-of-the-art LLMs, including testing or were superseded by later iterations on the same project (e.g. them on pairs of buggy/fixed code. LlaMA1[60]).InTableI,welist11modelsusedinourstudy, 2TABLE I. 11 models we studied, HumanEval Pass@1 is reported by PapersWithCode [49] or the source papers. correctnegative + correctpositive Model Parameters ContextLength HumanEval BalancedAccuracy= examplesnegative examplespositive
2 GPT-4[47] - 128k 86.6 We observed that LLMs often produce predictions that Gemini1.0Pro[59] - 32k 67.7 WizardCoder[44] 15B 2k 57.3 are highly imbalanced, such as labeling every example as CodeLLAMA[52] 34B 16k∼100k 48.8 “vulnerable” (see Section III-B), and that conventional metrics GPT-3.5[2] - 4k 48.1 such as F1 and MCC tend to favor these severely-imbalanced Mixtral-MoE[32] 45B 8k∼128k 40.2 Mistral[31] 7B 8k∼128k 30.5 classifiers (see our data package for details [4]). We argue StarCoder[38] 15B 8k 30.4 that an imbalanced classifier that pathologically predicts only LLAMA2[61] 70B 4k 29.9 one class is less desirable than one that demonstrates some StarChat-β [62] 15B 8k - MagiCoder[66] 7B 16k∼100k - ability to differentiate between classes. In order to reflect this, we primarily used Balanced Accuracy [8] to compare model TABLEII.Text generation parameters we used. performance. Parameter HuggingFace OpenAI Google III. STUDYDESIGNSANDRESULTSOFRQS Top-p 0.9 1.0 1.0 A. RQ1: What are the most successful and least successful Temperature 0.1 0.1 0.1 Max.tokensgenerated 512 512 512 prompt designs for LLM-based vulnerability detection? 1) Prompting methods: We used three existing prompting methods [40], including Basic (zero-shot) prompting, In- including the number of parameters (if it is an open source context (n-shot) prompting, and In-context prompting based on model), the length of the context window, i.e., the maximum embedding similarity. We also designed three new prompting number of input tokens, and HumanEval Pass@1 score [12], methods, namely In-context prompting based on contrastive a metric that can somewhat reflect the models’ capabilities in pair, Chain-of-Thought from CVE descriptions, and Chain-of- generating source code. Thought from static analysis. C. Implementation Basic (zero-shot) prompting: We first designed a system We wrote scripts to automatically query the models and prompt to set the context: “I want you to act as a vulnerability parse the model output. For the open-source models, we used detection system”, and we explored a variety of natural huggingface[29]localtextgenerationAPIsandwroteacustom language descriptions: (1) Basic query: “Is the following prompting framework. For GPT-3.5, GPT-4, and Gemini, we function buggy? Please answer Yes or No.” (We also tried used the APIs hosted by the corresponding organizations. “Is the following function vulnerable?”; however, our pilot Table II shows the text generation parameters we used for study shows that it did not perform as well.) (2) CWE list: the models. We selected values of the models’ hyperparam- This prompt starts with “Does the following function contain eters intended to make their responses more deterministic, one of the following bug types?”, followed by a fixed list of without sampling greedily (which can substantially reduce the bug types, e.g., “CWE-190: Integer Overflow”; (3) Q/A: Begin performance). the query with “Question:” and begin the model’s response The models generally replied with plain-text sentences. with “Answer:”. This conditions the model to respond in a We developed a regular expression-based method to extract question-answering mode. categorical yes/no responses; in cases where the models had In-context(n-shot)prompting: Here,weprovideexamplesof no categorical response, we considered the prediction incorrect. inputsandresponsesforin-contextlearning[9].Thein-context In order to ensure the correctness of our extraction method, examples condition the model to reply in the same format as we selected 10% of the responses for manual observation and the example responses. The selection of in-context examples foundthatourextractionprocedurehadanerrorrateoflessthan can impact the performance. We studied three settings: (1) 0.5%.Forfuturereuse,weopen-sourceourcodeanddocument randomly selected examples, (2) the examples that had similar further implementation details in our data package [4]. embeddings to the query example, and (3) the examples from contrastive pairs (see below for details). D. Metrics In-context prompting based on embedding similarity: We evaluated the models using the following four metrics: Choose the in-context examples which are most similar to Accuracy,theproportionofcorrectpredictionsonallexamples, the code in the query, where “similar” is approximated by the Precision, the proportion of correct predictions out of the vector proximity between code embeddings. Examples which examples on which the model predicted positive (vulnerable), arecloserinembeddingspacecanbemorerelevanttothequery Recall,theproportionofcorrectpredictionsoutoftheexamples code[36].Wesurveyedseveralembeddingmodels[51,48,21] whicharelabeledpositive(vulnerable),andBalancedAccuracy, and chose CodeBERT [21] because it performed best. We the mean of per-class accuracy for both positive and negative searched the SVEN dataset (the 100 examples we used) for classes. similar examples. 3In-context prompting based on contrastive pairs: We used buggypathsfromtheInferstaticanalyzer[18]forseveralopen- contrasting pairs by providing the vulnerable version of the source C++ projects. We convert the buggy paths to natural code (before the bug-fixing commit) and the fixed version language descriptions and use them as the response. This is (after the commit) as in-context examples in the same prompt. an example COT response for a buffer overflow vulnerability: Since these two versions of the source code differ primarily in 1. A buffer buf of size 10 is allocated at
the portion related to the bug-fix, we hoped that this prompt line 1. 2. An index i is initialized to a value in the template would highlight the cause of the bug and instruct the range [0, 100] at line 2. model to learn that the small differences in code can lead to 3. The index i is used to access buf at line 3. different labels. This may exceed the bounds of buf. In-context prompting based on CoT from CVE descrip- We append “Therefore, the example is buggy” to complete the tions: We designed “chain-of-thought” prompts by providing response. For non-vulnerable examples, we provide the default intermediatereasoningstepswhichleadtotheanswer,inspired response. by Wei et al. [65]. We use in-context examples from the Big- Considering the numerous ways the above prompting tech- Vul dataset [19], which includes the CVE bug reports. For niques can be combined, we systematically searched for high- vulnerable examples, we used the default in-context query performing prompts for each model. For the basic prompting and provide the chain-of-thought response. To produce such method, we evaluated four “modes” — (1) basic query, (2) response, we adapt the descriptions in these bug reports to Q/A, (3) CWE list, and (4) both Q/A and CWE list — and describe how the bug manifests. For example, CVE-2017- designated the best-performing mode as the Basic prompting 9211 [15] describes the vulnerability, including the symptoms, approach. attack surface, and variable involved: For the in-context prompting methods, we evaluated random The crypto_skcipher_init_tfm function in selection (IC-Random), embedding similarity search (IC- crypto/skcipher.c in the Linux kernel through Embedding), chain-of-thought from CVE descriptions (CoT- 4.11.2 relies on a setkey function that lacks CVE),andchain-of-thoughtfromstaticanalysis(CoT-SA).We a key-size check, which allows local users to didapilotstudy(§V)andfoundthat6shotsperformedthebest cause a denial of service (NULL pointer for most models and was the maximum number of examples dereference) via a crafted application. that fit within most models’ context sizes. Thus, we used 6 We use this description as the CoT response and append shots for all in-context prompts. For each approach, we used “Therefore, the example is buggy” to complete the response. two configurations: (1) 6 randomly-selected examples as well Fornon-vulnerableexamples,weprovidethedefaultin-context as (2) 3 contrastive pairs. We then selected the best outcome example query/response. for each model and prompting method. We list examples of In-context prompting based on CoT from static analysis: the prompts and more details in our data package [4]. Wealsousedtheoutputbuggypathsreportedbystaticanalysis 2) RQ1 Results: Figure 1 shows the performance of all tools to prepare the chains of thought prompt. The buggy path prompting methods over 3 random seeds, compared to a consists of a list of statements that can lead to the bug. We use hypothetical random-guess baseline. The x-axis shows the in-context examples from the D2A dataset [73], which lists models, and the y-axis reported the performance using the Prompting method Basic IC-Random IC-Embedding CoT-CVE CoT-SA Random-guess baseline (Best out of the contrastive and non-contrastive versions) 0.65 0.60 0.55 0.50 0.45 0.40 StarChat-β GPT-4 StarCoder Mixtral-MoE MagiCoder GPT-3.5 LLA M A 2 WizardCode or deLLA M A Mistral Gemini C ycaruccAdecnalaB Performance by prompt Best-performing prompt 4 4 1 2 FIG.1.A comparison of different prompting methods for each model on 50 buggy and fixed pairs (100 examples). Bar height shows the mean of three random seeds, with error bars indicating one standard deviation. 4Prompting method Basic IC-Random IC-Embedding CoT-CVE CoT-SA Random-guess baseline (Best out of the contrastive and non-contrastive versions) 0.65 0.60 0.55 0.50 0.45 0.40 GPT-4 LLA M A o2 deLLA M A WizardCoder GPT-3.5 MagiCoder Mistral Mixtral-MoE StarChat-β StarCoder Gemini C ycaruccAdecnalaB Performance by prompt Best-performing prompt 5 1 3 2 FIG. 2.A comparison of different prompting methods for each model on 50 unpaired examples (unrelated in terms of bug fixes). Bar height shows the mean of three random seeds, with error bars indicating one standard deviation. metric of Balanced Accuracy. Our results show that the Basic of 25 vulnerable and 25 non-vulnerable functions from our and IC-Random prompting methods achieve the best. As dataset that were unrelated in terms of bug fixes. Figure 2 summarized on the pie chart on the right in Figure 1, the two shows that Basic, IC-Random and IC-Embedding were still methods(theblueandtheorangecoloredparts)eachrankedthe the best prompting methods for the models, improving 5, 3, best for 4 out of 11 models. We designed different approaches and 2 models respectively. On these unpaired data, Contrastive to select examples used for in-context learning; our results Pairsperformedbetterthannon-contrastivesettings73%ofthe show no single approach fits all models. The IC-Embedding time. The varying results across settings indicate that different prompting ranked the best for 2 models and CoT-SA ranked scenarios may benefit from different prompts. the best for 1. Q/A and CWE-list substantially improved the performance Summary for RQ1: Across two vulnerability detection of Basic queries for 8 out of 11 models. Additionally, IC- settings, Basic or IC-Random prompts performed the
Embedding and CoT-SA techniques were beneficial for some best for 7-8 models. Integrating Q/A and CWE-list were models; IC-Embedding slightly outperformed IC-Random on useful for improving model performance. IC-Embedding LLAMA 2 and GPT-4, while CoT-SA substantially outper- and CoT-SA performed best for 3-4 models. Contrastive formedotherapproachesonaverageonMagiCoder.Contrastive Pairs were useful for predicting unpaired data. Pairs performed better than non-contrastive settings in 45% of cases. Surprisingly, CoT-CVE did not perform very well, with CoT-CVE performing worse than the random baseline B. RQ2: How well do state-of-the-art LLMs perform on on 6 out of 11 models. Our results indicate that additional vulnerability detection? bug information can be useful but not always. This may be To explore the capabilities of LLMs for vulnerability due to irrelevant context [55] (e.g. the bug report contains detection, we compared the performance of the best prompting information irrelevant to code like versions of programs) and method for each model over 3 random seeds, shown in is corroborated by Sun et al. [58]. Future work could explore Figure 3a. The best-performing model, StarChat-β, reported alternative information such as bug reports and bug-fixing 0.56BalancedAccuracy,showingthatthecurrentSOTALLMs commits and different prompting methods that use the bug were not able to perform well for vulnerability detection. The information. performance of the models ranged from 0.56 to 0.50, close We evaluated the models on a dataset containing pairs to that of random guessing. We observe that the model face of vulnerable and fixed programs. This represents a use- challenges of distinguishing non-vulnerable and vulnerable case such as using LLMs to detect vulnerabilities, and then versions,and7outof11modelsmade75%ormorepredictions checking again for the vulnerability after applying a bug-fixing on the same class. Some models are severely biased towards patch. While this aspect is shared with other benchmarks one label, e.g., Mistral mostly predicts non-vulnerable, with based on bug reports, like Big-Vul [19] and CVEFixes [5], only three exceptions. it may not represent all settings of vulnerablity detection. To Compared to the HumanEval performance showed in Ta- represent an alternative scenario of scanning new procedures ble I, we see that high code generation performance did not for vulnerabilities, we also plotted results on a random sample imply high vulnerability detection performance. Despite their 5Metric Balanced Accuracy Precision Recall Random-guess baseline 1. StarChat-β (IC-Random) 2. GPT-4 (IC-EmbeddingContrastive) 3. StarCoder (IC-Random) 4. Mixtral-MoE (Basic) 5. MagiCoder (CoT-SA) 6. GPT-3.5 (IC-Random) 7. LLAMA2 (IC-Embedding) 8. WizardCoder (IC-RandomContrastive) 9. CodeLLAMA (Basic) 10. Mistral (Basic) 11. Gemini (Basic) 0.0 0.2 0.4 0.6 0.8 1.0 Performance )tpmorP(ledoM 0.56 1. GPT-4 0 0. .5 75 2 (Basic) 0.55 2. LLAMA2 0 0. .5 47 5 (IC-EmbeddingContrastive) 0.54 3. CodeLLAMA 0 0. .5 29 9 (Basic) 0.54 4. WizardCoder 0 0. .6 20 1 (Basic) 0.53 5. GPT-3.5 0 0. .6 19 2 (IC-Embedding) 0.53 6. MagiCoder 0 0. .5 28 1 (IC-RandomContrastive) 0.53 7. Mistral 0 0. .5 72 8 (Basic) 0.53 8. Mixtral-MoE 0 0. .5 62 1 (Basic) 0.52 9. StarChat-β 0 0. .5 72 9 (IC-Random) 0.52 10. StarCoder 0.62 0.10 (IC-EmbeddingContrastive) 0.50 11. Gemini 0.50 0.98 (CoT-SAContrastive) 0.0 0.2 0.4 0.6 0.8 1.0 Performance (A)Performance on the paired dataset. )tpmorP(ledoM 0.63 0.65 0.53 0.57 0.54 0.89 0.55 0.57 0.43 0.55 0.53 0.79 0.54 0.75 0.13 0.54 0.69 0.19 0.54 0.67 0.16 0.54 0.53 0.77 0.53 0.52 0.69 0.52 0.83 0.07 0.50 0.50 1.00 (B)Performance on the unpaired dataset. FIG.3.Best-case performance of each model. Bar length represents the mean of three random seeds, with error bars indicating one standard deviation. The prompt used for each model is displayed in parentheses. TABLE III. Further analysis on 50 pairs of vulnerable and high code generation performance measured on HumanEval, non-vulnerable examples. Cell values display the number and WizardCoder, Code LLAMA, and Gemini performed poorly percentage of pairs in each category on vulnerability detection; while StarChat-β, StarCoder, and MagiCoder performed relatively better. These results indicate Model Can’tDistinguish BothCorrect BothWrong that effective vulnerability detection may require models to StarChat-Beta 24(48%) 18(36%) 8(16%) possessskillsbeyondcodegenerationanit’snotthatthebigger GPT-4 36(72%) 9(18%) 5(10%) models always perform better (StarChat-β, StarCoder, and StarCoder 35(70%) 10(20%) 5(10%) MagiCoder only contain 7B parameters as shown in Table I). Mixtral-MoE 36(72%) 10(20%) 4 (8%) MagiCoder 42(84%) 6(12%) 2 (4%) GPT-4 and Mixtral-MoE maintained similar rankings in both GPT-3.5 40(80%) 7(14%) 3 (6%) tasks, displaying versatility across different contexts. LLAMA2 41(82%) 6(12%) 3 (6%) Table III presents a further analysis on the models’ capabil- WizardCoder 21(42%) 17(34%) 12(24%) CodeLLAMA 44(88%) 5(10%) 1 (2%)
ities of distinguishing pairs of vulnerable and nonvulnerable Mistral 47(94%) 3 (6%) 0 (0%) code. In the table, under Column Can’t Distinguish, we show Gemini 49(98%) 1 (2%) 0 (0%) that on average, 76% of pairs cannot be distinguished. Under Average 38(76%) 8(16%) 4 (8%) Both Correct and Both Wrong columns, our results indicate that the models can predict both versions correctly in some instances (16% of pairs), but there are also cases (8% of pairs) C. RQ3: What types of errors do LLMs make when explaining where the models can predict both versions incorrectly. vulnerabilities? Similar to the scenario in Figure 2, Figure 3b plots model 1) Study setup: For a vulnerability detection tool to be performanceon50unpairedexamples.Weseethatperformance useful, we hope it can present further vulnerability information improved slightly, but still ranged close to the random-guess such as its cause, types and locations. We prompted the LLMs baseline – from 0.5 to 0.63 Balanced Accuracy. The models to explain all predictions that they identified as vulnerable and rankeddifferentlyforthetwosettingsofvulnerabilitydetection. analyzed their responses. The prompt consists of three parts: In Figure 3b, the unpaired setting, GPT-4 performed the best. (1)theoriginalpromptdescribedinSectionIII-A,applyingthe best-case prompt for each model noted in Figure 3a, (2) the Summary for RQ2: The SOTA LLMs performed model’soriginalprediction,e.g.,“Yes/No”forbasicpromptsor poorlyatvulnerabilitydetection,withBalancedAccuracy “Yes, the vulnerability is CWE-XXX: Description” for prompts ranging from 0.5 to 0.63. All models frequently failed with CWE information, and (3) “Please explain the reasoning to distinguish the buggy and fixed versions of programs for this answer”. We manually inspected the results and found — in 76% of cases on average. that the models generally make four types of errors: 6Does the response contain an error? No Yes Noerrorobserved Codeunderstanding Hal/Mem/Rep Logicerror Commonknowledge 0 50 100 150 #Responses yrogetaCrorrE TABLE IV. Fine-grained errors across 287 responses to 100 programs. For select Code understanding errors related to specific code structures, the total number of responses 124(43%) containing those structures and the percentage with an error 118(41%) are shown. 31(11%) Errorcategory Error Count 27(9%) Codeunderstanding Bounds/NULLcheck 76/159(48%) 8(3%) Stringoperation 3/29(10%) Integermath 8/96 (8%) Pointeroperation 9/147 (6%) Alloc/Freeoperation 4/81 (5%) Indexoperation 1/60 (2%) FIG.4.Error categories observed in responses from all LLMs. Controlflow 11 Barwidthshowsthenumberoferrorsobservedineachcategory. Syntax 5 Bar labels show the percentage of responses containing each Datatype 1 category of error. Responses can contain more than one error. Total 118 Hal/Mem/Rep Hallucination 15 Memorization 11 a) Code understanding errors: LLMs produce incorrect Repetition 5 assertions about programming language semantics or code Total 31 structures, including inferences about the effects of statements. Logicerror Implication 14 For instance, sometimes an LLM incorrectly claims that a Inconsistent 13 pointer is accessed without being checked, when the code Total 27 contains the check; sometimes an LLM claims an integer Commonknowledge Total 8 operationwilloverflowwhen,consideringthesizeoftheinteger and possible values of the operation implied by the prior code FIG.5.Example of Missing Bounds/NULL Check. context, an overflow is impossible. b) Common knowledge errors: LLMs make incorrect Source code (Example 4/non-vulnerable) assumptions about the applications, environment, or code conventions.Forexample,anLLMmayclaimthatifavariable N_FDC is negative, an error will occur. However, a developer 1 static int changedline (const Proto *p, int (cid:44)→ oldpc, int newpc) { could reasonably interpret the all-capitalized N_FDC as a 2 if (p->lineinfo == NULL) /* no debug configuration variable representing the number of a certain (cid:44)→ information? */ resource (floppy-disk controllers), and assume it to be a non- 3 return 0; 4 while (oldpc++ < newpc) { negative integer by definition. 5 if (p->lineinfo[oldpc]!= 0) c) Logic errors: LLMs produce statements that are 6 return (luaG_getfuncline(p, oldpc - logically contradictory or make incorrect logical implications (cid:44)→ 1)!= luaG_getfuncline(p, newpc)); 7 } when reaching the conclusions of vulnerability. For example, 8 return 0; /* no line changes between sometimesanLLMcorrectlyclaimsthatavariablexischecked, (cid:44)→ positions */ then later contradicts itself by stating, “since x is not checked, 9 } it will overflow...”. d) Hallucination/Memorization/Repetition errors: LLMs Model response (StarChat-β) produce hallucination (stating information that sounds correct butisnotpresentintheprompt,suchaspointingoutapotential The function dereferences the ‘p->lineinfo‘ pointer integer overflow in an arithmetic operation that doesn’t appear withoutcheckingifitisnull.Thiscancauseacrashorother in the code), memorization (memorizing information from the unexpected behavior if the pointer is null... training data), and repetition (infinitely repeating certain text sequences, for instance: “1. The function is buggy because it is not checking for a null pointer. 2. The function is buggy because it is not checking for a null pointer. 3...”). (κ) [22], ranging from -1 to 1 with higher values indicating
We collected quantitative data by analyzing a total of 287 more agreement. We resolved disagreements by siding with eligible LLM responses to understand how often LLMs make the majority when two raters differed from the third, resulting such mistakes. To mitigate subjectivity, we used independent in κ=0.75 with 83% of responses classified identically. We ratings from three authors, similar to Islam et al. [30]. The then discussed the remaining disagreements until reaching a ratersindependentlyidentifiedtheerrorsintheLLMresponses, final categorization for each response. meeting to refine the protocol after processing 1⁄ 3, 1⁄ 2, and all of 2) RQ3 Results: Figure 4 reports how often each type thedata.Wemeasuredinter-rateragreementusingFleiss’kappa of error occurred in LLM responses. We found that LLMs 7FIG.6.Example of Incorrect Understanding of Integer Math. Prediction + Bug Type Prediction Prediction + Location Prediction + Cause Prediction + Bug Type + Location Source code (Example 21/non-vulnerable) StarCoder 1 void AllocateDataSet(cmsIT8* it8) { 2 TABLE* t = GetTable(it8); MagiCoder 3 if (t -> Data) return; // Already Mistral (cid:44)→ allocated 4 t-> nSamples = atoi(cmsIT8GetProperty(it8, Mixtral (cid:44)→ "NUMBER_OF_FIELDS")); 5 t-> nPatches = atoi(cmsIT8GetProperty(it8, CodeLLAMA (cid:44)→ "NUMBER_OF_SETS")); 6 if (t -> nSamples < 0 || t->nSamples > StarChat-β (cid:44)→ 0x7ffe || t->nPatches < 0 || (cid:44)→ t->nPatches > 0x7ffe) WizardCoder 7 SynError(it8, "AllocateDataSet: too much (cid:44)→ data"); LLAMA2 8 else { 9 t->Data = (char**)AllocChunk(it8, 0 25 50 75 100 (cid:44)→ ((cmsUInt32Number)t->nSamples + 1) * %Correct (cid:44)→ ((cmsUInt32Number)t->nPatches + 1) * (cid:44)→ sizeof(char*)); 10 if (t->Data == NULL) { 11 SynError(it8, "AllocateDataSet: Unable (cid:44)→ to allocate data array"); 12 } 13 } 14 } Model response (GPT-4) Both t->nSamples and t->nPatches are user- controlled and can be large. When these values are incre- mented by 1 and then multiplied by each other and further multiplied by sizeof(char*), the result can overflow a 32-bit integer if the values are sufficiently large... Before performing the allocation, the function checks if nSamples and nSamples are within a certain range (0 to 0x7ffe). However, this check does not prevent overflow. For example, values close to the upper limit can still cause themultiplicationresulttowraparound,leadingto asmaller allocation than required. were somewhat useful for explaining vulnerabilities as 43% responses contain no errors. Improving Code Understanding will help LLMs to better explain vulnerabilities, as 41% responsescontainedcodeunderstandingerrors.Ourresultsalso demonstrate that LLMs still cannot perfectly solve reasoning problems, as some of the incorrect code understanding is due to not being able to reason about the values across several statements, and there are 9% Logic errors and 3% Common Knowledgeerrors,whichcanaffectthereasoning.Weobserved that LLMs tended towards Hallucination, Memorization, and Repetition (11% of errors) during vulnerability detection. In Table IV, we show the specific code structures that affected the correctness of the explanations as well as the fine-grained classifications for Logic errors and Hallucina- tion/Memorization/Repetition errors. For example, out of 159 responses explaining bounds checks, 76 are incorrect. Figure 5 shows an example of a model response which flags ledoM 0 25 50 75 100 %Correct FIG. 7. Performance of the best prompting method/seed. Prediction displays binary prediction accuracy; bars with Bug Type, Location, and Cause display correct predictions for explaining a vulnerability. GPT-3.5, GPT-4, and Gemini each produced fewer than 5 eligible explanations each and are excluded. an unchecked null-pointer dereference, presumably at line 5 (p->lineinfo[oldpc]), even though p->lineinfo is checked at line 2. This error can lead to false positives, especially after the bug is fixed by adding a check. Figure 6 shows an example of an integer math error. From themodelresponse,theLLMcorrectlyidentifiedthatthereisa bounds-checkatline6(itwasinfactaddedbythedevelopersto preventoverflows[16]).Giventheupperboundof0x7ffffor nSamples+1 and nPatches+1, even the maximum values wouldnotcauseanoverflowinanunsignedinteger(0x7fff∗ 0x7fff∗8 = 0xfff80008) at line 9; however, the LLM was not able to understand and reason about such math. Figure7presentsthesecondpartoftheresultsofRQ3.Here, we provide a summary of the models’ capability of reporting the type, the location and the root cause of a vulnerability. The results show that although most LLMs correctly predicted up to 79% of vulnerable examples (blue bar), correctness dropped substantially when we consider the predicted location, bug type, and causes. Considering both bug types and locations (orange bar), all models dropped in correctness, decreasing by 18-67%,withMistralfailingonallexamples.Consideringroot- cause explanations (purple bar), model correctness decreased by 24-50%. 8TABLEV.LLMscorrectlylocated6outof27DbgBenchbugs. Human Time/Difficulty Correct LLM prediction Human performance reports the average perceived difficulty 15 andtimetodiagnosis,andthepercentageofhumanparticipants
which correctly localized each bug. 2 models failed to localize 10 any bugs (not shown), including GPT-4. Humanperformance 5 Human Diagnosis Difficulty Total6Bugs Model Correct Time(min.) Rating(1-5) 0 0 10 20 30 40 50 60 GPT-3.5 find.24bf33c0 71% 31 Moderate(3) DiagnosisTime(min) Mixtral-MoE Gemini grep.2be0c659 78% 29 Moderate(3) MagiCoder grep.c96b0f2c 80% 50 VeryDifficult(4) GPT-3.5 CodeLLAMA grep.3220317a 100% 42 VeryDifficult(4) Mixtral-MoE CodeLLAMA Gemini grep.5fa8c7c9 100% 26 Moderate(3) GPT-3.5 MagiCoder grep.55cf7b6a 100% 15 Slight(2) GPT-3.5 Summary for RQ3: 57% of LLM responses contained errors when explaining the cause of vulnerabilities. The most common error categories were “Code un- derstanding” (41% of responses, especially missing Bounds/NULL Checks) and “Hallucination/Memoriza- tion/Repetition” (11% of responses). All LLMs experi- enced a substantial (18-100%) drop in correctness when considering their predictions of bug location, type and causes. D. RQ4: How do LLMs compare to human developers for localizing vulnerabilities? 1) Study setup: We are interested in how LLMs’ perfor- mance compares to human’s. To set up the experiments, we used DbgBench [7], where the detailed human performance is reported. In their study [7], 12 professional software engineers were given the source code, bug reports, and test cases related to 27 bugs in the find and grep utilities, such as null- pointer dereferences, buffer overflows, resource leaks, and infinite loops. They reported a diagnosis with fault locations, rated their perception of the difficulty, and provided a patch. To match the human setting, we gave each LLM prompts containing the bug report, test case, the source code of the files containing the faults, and instructions “Given the above bug report, test case which reproduces the bug, and source code,providethelinenumbersofallfaultlocationsinthecode in the following format: * file.c:line number.” These prompts(averaging19-25ktokens)exceededthecontextlength of 4 models, which we excluded. We extracted fault locations from the outputs of the remaining 7 LLMs and compared them with the correct fault locations from DbgBench, considering a prediction correct if at least one fault location matched the ground truth. 2) RQ4 Results: Table V shows that among the 7 models, 5 models produced correct output for 6 bugs and made a total of 11 correct predictions (some models predict correctly on the same bug). GPT-3.5 performed the best and correctly located 4 tnuoC SlightModerate Very Extreme PerceivedDifficulty FIG. 8. The distribution of time and difficulty reported by humans in DbgBench, and the portion of examples predicted correctly by at least one LLM. Human diagnosis All models incorrect Correct LLM Localizations GPT-3.5 GPT-3.5 + Mixtral-MoE Gemini + MagiCoder Mixtral-MoE + Code LLAMA GPT-3.5 + Gemini + Code LLAMA + MagiCoder 100 80 60 40 20 0 grep. g5 r5 ecf p.7 5b f6 aa 8 gc r7 ec p.9 3220317a BugID )%(hctaM grep g. rc e9 p6 .b 2 fi0 bf ne2 d0 .c c 26 45 b9 f33c0 FIG.9.Comparison of Human and LLM performance on all bugsinDbgBench.BugsareorderedalongtheX-axisbyhuman performance/diagnostic time. Y-axis indicates the percentage of humans or LLMs that correctly localized the bug. bugs. Interestingly, the newer model, GPT-4, did not match the ground-truth fault locations on any examples, and neither did Mistral.Thesebugstookhumans15to50minutestodiagnose, with difficulty ratings ranging from slightly to very difficult (2-4 on a scale of 1 to 5). Figure 8 shows where these bugs located in the entire dataset in terms of perceived difficulty and diagnostic time given by DbgBench. In Figure 9, we visualize individual bugs and rank them based on the “easiness” of diagnosis for humans. To do so, we first rank the bugs based on the correctness achieved among the developers who diagnosed the bug, and to break ties, we compared the diagnosis time. Here, we can see that LLMs failed to correctly predict the examples which were the hardest for humans (rightmost dots), as well as many of the easiest examples(missing4outof7leftmostdotswhichwerelocalized 9by 100% of humans). Overall, LLMs performed substantially labels, we would expect performance to be substantially worsethanhumans.Mostofthecorrectpredictionswereamong reduced on the refactored code. We saw both improvements those already correctly diagnosed by 70-100% of human study and reductions in performance and only small differences in participants. These findings suggest that current LLMs still BalancedAccuracy(0.02-0.09),evenwithextensiverefactoring. have significant limitations in localizing software bugs in a As a pilot study, we tried using 2, 4, 6, or 8 in-context real-world setting. examples for the models which performed best with in-context promptsandfoundthat6-shotsperformedbestformostmodels. Summary for RQ4: LLMs correctly located 6/27 bugs Extendedresultsforbothexperimentscanbefoundinourdata from DbgBench, while at least one human correctly package [4]. located 27/27 bugs. GPT-3.5 performed the best and correctly located 4 bugs. GPT-4 and Mistral failed to VI. RELATEDWORK locate any bugs correctly. LLMs [47, 59, 44, 2, 62, 38, 31, 32, 66] have been applied in various software engineering tasks, such as code genera-
IV. THREATSTOVALIDITY tion [12], test generation [35], and code summarization [3] through the use of prompting. Effective prompts can condition Better prompts than those we evaluated may exist. To draw themodelforaspecifictasklikeQuestion/Answer[9],provide out the best model performance, we systematically explored additional information through in-context examples [36] and several prominent prompting techniques and introduced three elicit step-by-step reasoning through chain-of-thought exam- new approaches motivated by our domain knowledge (Sec- ples[65,34].Wehavesurveyedthesestate-of-the-artprompting tion III-A). Model performance can be reduced by irrelevant methods and applied them to the vulnerability detection task. context [55], or the order [43] and quality [39] of in-context examples. To alleviate these potential issues, we reported the Recent studies have initiated investigation into the usage of best-case performance among various prompt orderings and LLMsforvulnerabilitydetection,usingwell-knowntechniques varying amounts and sources of context, and tried different such as zero-shot prompting [50, 23], in-context learning [24, number of shots (Section V). 42, 11], and fine-tuning [54, 70, 68]. Several papers have We queried 100 functions due to the cost of LLMs (see utilized chain-of-thoughts (CoT), such as “Let’s think step-by- Section II-A), which may seem limited. However, we note that step” [37, 20, 57], multi-step prompts [63, 69], and generic ourdatasetoriginatesfrom36open-sourceprojectswithdiverse informationsuchasCFG,DFG,PDG,andAPIcalls[71,46,33, applications, including databases, servers, and interpreters. The 63]. In this work, we propose three novel prompt approaches vulnerabilities represent important issues, all featuring in the tailoredforvulnerabilitydetection,integratinginformationfrom top-25 most dangerous CWEs for the past five years [14], bug-fix commits (contrastive pairs), CVE descriptions (CoT- and were manually curated to ensure quality [17] and focus CVE), and static analysis reports (CoT-SA). We reported the on real-world use cases [26]. We performed a pilot study capabilitiesofLLMstodistinguishbuggyandpatchedversions which suggests that the models did not memorize the labels of code. (Section V). Several concurrent papers have taken initial steps to analyze To mitigate potential biases from the experimental setup errors in LLM-generated vulnerability detection responses (to and metrics, we evaluated the LLMs in two distinct scenarios: the best of our knowledge, the papers are in arXiv but not yet “paired” and “unpaired” (Section III-B), and aggregated results published). Yu et al. [69] analyzed 82 responses and identified across 3 different random seeds. We justify our choice of problems based a taxonomy of errors in ChatGPT responses metrics in Section II-D. to StackOverflow and general-purpose queries, corroborating The analysis of the LLMs errors can involve subjective our findings that the models struggled with correctness, logic judgments. To ensure confident classifications, we rigorously and consistency. Nong et al. [46] categorized 100 incorrect designed the categories to be unambiguous and confirmed predictions from GPT-3.5 into Insufficient Context, CWE agreement among multiple raters (Section III-C). Oblivion, Incomplete Control Flow/Data Flow; similarly, Sun OurfaultlocalizationsettingforDbgBenchdoesnotperfectly et al. [58] used an LLM guided by 300 human annotations to match the human debugging scenario, as the LLMs could not categorize incorrect predictions into Wrong Reasoning, Need run tests or use debuggers. While such tools could aid LLMs Detailed Analysis, Need Other Code, and Other. Ullah et al. in making a diagnosis, we focused on a direct use of LLMs in [63] automatically compared GPT-4’s summarization of LLM thisstudy.Investigatinglong-contextintegrationandinteractive reasoning with human-generated reasoning based on BLEU, agent environments is an interesting direction for future work. ROUGE, and GPT-4. Ourerrorclassificationsprovidemorefine-grainedcategories V. DISCUSSIONS thanpriorwork(seeTableIV)andallowustoanalyzethecode Because SVEN is open-source, the labels may be inside the structures which affected the correctness of explanations, and training dataset of recent LLMs. To test if LLMs memorized thuspotentiallyprovidebetterguidanceforfutureimprovement vulnerabilitylabels,werefactoredthecode(usingNatGen[10]) of LLMs for vulnerability detection. We provide a quantitative while preserving the vulnerability labels and evaluated the comparison of LLMs and human’s performance on locating models. If the models simply memorized the vulnerability bugs. To the best of our knowledge, our work is the most 10comprehensive: compared to prior works, we analyzed more detection in code at edittime: Zero-shot, few-shot, or models(11),andusedmultipleraterswitharigorousagreement fine-tuning? arXiv:2306.01754, 2023. protocol based on Fleiss’ κ to manually annotate a larger set [12] Mark Chen, Jerry Tworek, Heewoo Jun, et al. Evaluating of 287 LLM responses. largelanguagemodelstrainedoncode. arXiv:2107.03374, 2021. VII. CONCLUSIONS [13] Wei-Lin Chiang, Zhuohan Li, Zi Lin, et al. Vicuna: An open-source chatbot impressing gpt-4 with 90%* chatgpt Wecomprehensivelyevaluatedthevulnerabilitydetectionca- quality, March 2023. URL https://lmsys.org/blog/2023-
pabilities of LLMs from the standpoint of their performance as 03-30-vicuna/. wellastheirerrors.Wefoundthattheyperformedonlyslightly [14] The MITRE Corporation. Stubborn Weaknesses in the better than random guessing (0.5-0.63 Balanced Accuracy), CWE Top 25, 2023. URL https://cwe.mitre.org/top25/ and that they struggled to differentiate buggy and fixed code archive/2023/2023 stubborn weaknesses.html. versions, making the same prediction for 76% of pairs. We [15] The MITRE Corporation. CVE-2017-9211, 2024. URL proposedCoTfromStaticAnalysisandContrastivepairs,which https://nvd.nist.gov/vuln/detail/CVE-2017-9211. improved the performance of some models. LLMs frequently [16] The MITRE Corporation. CVE-2018-16435, 2024. URL madeCodeUnderstanding,CommonKnowledge,Hallucination, https://nvd.nist.gov/vuln/detail/CVE-2018-16435. and Logic errors when explaining vulnerabilities, with 57% [17] RolandCroft,MAliBabar,andMMehdiKholoosi. Data of responses containing errors. On complex debugging tasks quality for software vulnerability datasets. In ICSE, 2023. from DbgBench, LLMs performed far worse than humans, [18] Facebook. Infer Static Analyzer, 2024. URL https:// correctly localizing only 6/27 bugs. These findings highlight fbinfer.com/. LLMs’ limitations in vulnerability detection and our dataset [19] Jiahao Fan, Yi Li, Shaohua Wang, and Tien N. Nguyen. of LLM errors provides guidance for future improvement of A C/C++ Code Vulnerability Dataset with Code Changes LLM-based vulnerability detection. and CVE Summaries. In MSR, 2020. [20] Sidong Feng and Chunyang Chen. Prompting is all you REFERENCES need: Automated android bug replay with large language [1] Bard announcement, February 2023. URL https://blog. models. In ICSE, 2024. google/technology/ai/bard-google-ai-search-updates/. [21] Zhangyin Feng, Daya Guo, Duyu Tang, et al. Code- [2] gpt-3.5-turbo-0613announcement, June2023. URLhttps: BERT: A pre-trained model for programming and natural //community.openai.com/t/gpt-3-5-turbo-0613-function- languages. arXiv:2002.08155, 2020. calling-16k-context-window-and-lower-prices/263263. [22] Joseph L. Fleiss. Measuring nominal scale agreement [3] ToufiqueAhmed,KunalSureshPai,PremkumarDevanbu, among many raters. Psychological Bulletin, 1971. and Earl T. Barr. Automatic semantic augmentation of [23] MichaelFu,ChakkritTantithamthavorn,VanNguyen,and language model prompts (for code summarization). In Trung Le. ChatGPT for vulnerability detection, classifi- ICSE, 2024. cation, and repair: How far are we? arXiv:2310.09810, [4] Anonymous. Data package, 2024. https://figshare.com/s/ 2023. 78fe02e56e09ec49300b. [24] Zeyu Gao, Hao Wang, Yuchen Zhou, Wenyu Zhu, and [5] Guru Bhandari, Amara Naseer, and Leon Moonen. CVE- Chao Zhang. How far have we gone in vulnerability fixes: Automated Collection of Vulnerabilities and Their detection using large language models, 2023. Fixes from Open-source Software. In PROMISE, 2021. [25] GitHub. CodeQL Static Analyzer, 2024. URL https: [6] Stella Biderman, Hailey Schoelkopf, Quentin Gregory //codeql.github.com/. Anthony, et al. Pythia: A suite for analyzing large [26] Jingxuan He and Martin Vechev. Large language models language models across training and scaling. In ICML, for code: Security hardening and adversarial testing. In 2023. CCS, 2023. [7] MarcelBo¨hme,EzekielOlamideSoremekun,etal. Where [27] Ashish Hooda, Mihai Christodorescu, Miltos Allamanis, is the bug and how is it fixed? an experiment with et al. Do large code models understand programming practitioners. In ESEC/FSE, 2017. concepts?ablack-boxapproach. arXiv:2402.05980,2024. [8] Kay Henning Brodersen, Cheng Soon Ong, Klaas Enno [28] JieHuangandKevinChen-ChuanChang.Towardsreason- Stephan, and Joachim M. Buhmann. The Balanced inginlargelanguagemodels:Asurvey.arXiv:2212.10403, Accuracy and Its Posterior Distribution. In ICPR, 2010. 2022. [9] TomBrown,BenjaminMann,NickRyder,etal.Language [29] HuggingFace. Generation with LLMs. URL https:// models are few-shot learners. NeurIPS, 2020. huggingface.co/docs/transformers/en/llm tutorial. [10] Saikat Chakraborty, Toufique Ahmed, Yangruibo Ding, [30] Md Johirul Islam, Giang Nguyen, Rangeet Pan, and et al. NatGen: generative pre-training by “naturalizing” Hridesh Rajan. A comprehensive study on deep learning source code. In ESEC/FSE, 2022. bug characteristics. In ESEC/FSE, 2019. [11] Aaron Chan, Anant Kharkar, and Roshanak Zilouch- [31] Albert Q Jiang, Alexandre Sablayrolles, Arthur Mensch, ian others Moghaddam. Transformer-based vulnerability et al. Mistral 7b. arXiv:2310.06825, 2023. 11[32] Albert Q Jiang, Alexandre Sablayrolles, Antoine Roux, [50] M.Purba,A.Ghosh,B.J.Radford,andB.Chu. Software et al. Mixtral of experts. arXiv:2401.04088, 2024. vulnerability detection using large language models. In [33] Avishree Khare, Saikat Dutta, Ziyang Li, Alaia Solko- ISSREW, 2023. Breslin, Rajeev Alur, and Mayur Naik. Understanding [51] Nils Reimers and Iryna Gurevych. Sentence-BERT:
the effectiveness of large language models in detecting Sentence embeddings using siamese bert-networks. security vulnerabilities. arXiv:2311.16169, 2023. arXiv:1908.10084, 2019. [34] TakeshiKojima,ShixiangShaneGu,MachelReid,Yutaka [52] Baptiste Roziere, Jonas Gehring, Fabian Gloeckle, Matsuo, and Yusuke Iwasawa. Large language models et al. Code Llama: Open foundation models for code. are zero-shot reasoners. NeurIPS, 2022. arXiv:2308.12950, 2023. [35] Caroline Lemieux, Jeevana Priya Inala, Shuvendu K. [53] SecureSoftware. RATS - Rough Auditing Tool for Lahiri,andSiddharthaSen.Codamosa:Escapingcoverage Security, 2001. URL https://code.google.com/archive/ plateausintestgenerationwithpre-trainedlargelanguage p/rough-auditing-tool-for-security/. models. In ICSE, 2023. [54] Alexey Shestov, Anton Cheshkov, Rodion Levichev, [36] Patrick Lewis, Ethan Perez, Aleksandra Piktus, et al. et al. Finetuning large language models for vulnerability Retrieval-augmented generation for knowledge-intensive detection. arXiv:2401.17010, 2024. nlp tasks. NeurIPS, 2020. [55] Freda Shi, Xinyun Chen, Kanishka Misra, et al. Large [37] Haonan Li, Yu Hao, Yizhuo Zhai, and Zhiyun Qian. The language models can be easily distracted by irrelevant hitchhiker’s guide to program analysis: A journey with context. In ICML, 2023. large language models. arXiv:2308.00245, 2023. [56] SonarSourceSA. SonarQubeStaticAnalyzer,2024. URL [38] Raymond Li, Loubna Ben Allal, Yangtian Zi, et al. https://www.sonarsource.com/products/sonarqube/. Starcoder:maythesourcebewithyou! arXiv:2305.06161, [57] Yuqiang Sun, Daoyuan Wu, Yue Xue, et al. GPTScan: 2023. Detecting logic vulnerabilities in smart contracts by [39] Jiachang Liu, Dinghan Shen, Yizhe Zhang, et al. What combining gpt with program analysis. In ICSE, 2024. makes good in-context examples for GPT-3? In DeeLIO, [58] Yuqiang Sun, Daoyuan Wu, Yue Xue, et al. LLM4Vuln: 2022. A unified evaluation framework for decoupling and en- [40] Pengfei Liu, Weizhe Yuan, Jinlan Fu, et al. Pre-train, hancing llms’ vulnerability reasoning. arXiv:2401.16185, prompt, and predict: A systematic survey of prompting 2024. methodsinnaturallanguageprocessing. ACMComputing [59] Gemini Team. Gemini: A family of highly capable Surveys, 2021. multimodal models. arXiv:2312.11805, 2023. [41] Xiao Liu, Hao Yu, Hanchen Zhang, et al. Agentbench: [60] Hugo Touvron, Thibaut Lavril, Gautier Izacard, et al. Evaluating llms as agents. arXiv:2308.03688, 2023. LLaMA: Open and efficient foundation language models. [42] Zhihong Liu, Qing Liao, Wenchao Gu, and Cuiyun Gao. arXiv:2302.13971, 2023. Software vulnerability detection with GPT and in-context [61] Hugo Touvron, Louis Martin, Kevin Stone, et al. learning. In IEEE DSC, 2023. Llama 2: Open foundation and fine-tuned chat models. [43] Yao Lu, Max Bartolo, Alastair Moore, Sebastian Riedel, arXiv:2307.09288, 2023. and Pontus Stenetorp. Fantastically ordered prompts and [62] Lewis Tunstall, Nathan Lambert, Nazneen Rajani, et al. where to find them: Overcoming few-shot prompt order Creating a coding assistant with starcoder. Hugging Face sensitivity. In ACL, May 2022. Blog, 2023. URL https://huggingface.co/blog/starchat. [44] Ziyang Luo, Can Xu, Pu Zhao, Qingfeng Sun, Xi- [63] SaadUllah,MingjiHan,SaurabhPujar,HammondPearce, ubo Geng, Wenxiang Hu, Chongyang Tao, Jing Ma, AyseCoskun,andGianlucaStringhini.Canlargelanguage Qingwei Lin, and Daxin Jiang. WizardCoder: Empow- models identify and reason about security vulnerabilities? ering code large language models with evol-instruct. not yet. arXiv:2312.12575, 2023. arXiv:2306.08568, 2023. [64] Laura Wartschinski, Yannic Noller, Thomas Vogel, et al. [45] Georgios Nikitopoulos, Konstantina Dritsa, Panos Louri- VUDENC: vulnerability detection with deep learning on das, et al. CrossVul: A Cross-Language Vulnerability a natural codebase for python. Information and Software Dataset with Commit Data. In ESEC/FSE, 2021. Technology, 2022. [46] Yu Nong, Mohammed Aldeen, Long Cheng, Hongxin [65] Jason Wei, Xuezhi Wang, Dale Schuurmans, et al. Chain- Hu, Feng Chen, and Haipeng Cai. Chain-of-Thought of-Thought prompting elicits reasoning in large language prompting of large language models for discovering and models. NeurIPS, 2022. fixing software vulnerabilities. arXiv:2402.17230, 2024. [66] Yuxiang Wei, Zhe Wang, Jiawei Liu, Yifeng Ding, and [47] OpenAI. GPT-4 technical report, 2024. LingmingZhang. Magicoder:Sourcecodeisallyouneed. [48] OpenAI. Embeddings documentation, 2024. URL https: arXiv preprint arXiv:2312.02120, 2023. //platform.openai.com/docs/guides/embeddings. [67] David Wheeler. Flawfinder, 2001. URL https://dwheeler. [49] PapersWithCode. HumanEval Benchmark, 2023. com/flawfinder/. URL https://paperswithcode.com/sota/code-generation- [68] Aidan ZH Yang, Claire Le Goues, Ruben Martins, and on-humaneval. [Accessed 27-10-2023]. Vincent Hellendoorn. Large language models for test-free 12fault localization. In ICSE, 2024. [69] Jiaxin Yu, Peng Liang, Yujia Fu, et al. Security Code
Review by LLMs: A Deep Dive into Responses, 2024. [70] Imam Nur Bani Yusuf and Lingxiao Jiang. Your instruc- tions are not always helpful: Assessing the efficacy of instructionfine-tuningforsoftwarevulnerabilitydetection. arXiv:2401.07466, 2024. [71] Chenyuan Zhang, Hao Liu, Jiutian Zeng, et al. Prompt- enhanced software vulnerability detection using chatgpt. arXiv:2308.12697, 2023. [72] Wayne Xin Zhao, Kun Zhou, Junyi Li, et al. A survey of large language models. arXiv:2303.18223, 2023. [73] Yunhui Zheng, Saurabh Pujar, Burn Lewis, et al. D2A: A dataset built for ai-based vulnerability detection methods using differential analysis. In ICSE-SEIP, 2021. 13
2403.18403 FoC: Figure out the Cryptographic Functions in Stripped Binaries with LLMs Guoqiang Chen∗‡, Xiuwei Shang∗‡, Shaoyin Cheng†‡§, Yanming Zhang∗‡, Weiming Zhang†‡§, and Nenghai Yu†‡§ ∗{ch3nye, shangxw, azesinter}@mail.ustc.edu.cn, †{sycheng, zhangwm, ynh}@ustc.edu.cn ‡ University of Science and Technology of China, Hefei, China § Anhui Province Key Laboratory of Digital Security, Hefei, China Abstract—Analyzing the behavior of cryptographic functions preliminarysuccesses.Inaddition,theLargeLanguageModels in stripped binaries is a challenging but essential task. Crypto- (LLMs) in the source-code domain, such as Codex [4], GPT-J graphic algorithms exhibit greater logical complexity compared [5], and GPT-NeoX [6], have demonstrated impressive code to typical code, yet their analysis is unavoidable in areas such comprehension [7]. It is promising to leverage this capabil- as virus analysis and legacy code inspection. Existing methods often rely on data or structural pattern matching, leading to ity to analyze binary code, especially the hard analysis on suboptimal generalizability and suffering from manual work. cryptographic functions, to provide comprehensible semantic In this paper, we propose a novel framework called FoC to information to participants. Currently, there is no such study Figure out the Cryptographic functions in stripped binaries. or available dataset. In FoC, we first build a binary large language model (FoC- However, the prediction of generative language models BinLLM)tosummarizethesemanticsofcryptographicfunctions innaturallanguage.ThepredictionofFoC-BinLLMisinsensitive is inherently powerless to reflect minor changes in binary to minor changes, such as vulnerability patches. To mitigate code. It is an essential ability to distinguish between two it, we further build a binary code similarity model (FoC-Sim) similardata,especiallytorecognizethepatchedandvulnerable upontheFoC-BinLLMtocreatechange-sensitiverepresentations cryptographic functions. Methods [8], [9], [10], [11], [12], and use it to retrieve similar implementations of unknown [13] designed for Binary code similarity detection (BCSD) cryptographic functions in a database. In addition, we construct a cryptographic binary dataset for evaluation and to facilitate generate embeddings of binary function, which could be very further research in this domain. And an automated method is sensitivetoanychangesinthecode.Therefore,thesemethods devised to create semantic labels for extensive binary functions. havethepotentialtocompensateforthepreviouslymentioned Evaluation results demonstrate that FoC-BinLLM outperforms weakness. While these methods have demonstrated promising ChatGPT by 14.61% on the ROUGE-L score. FoC-Sim outper- results ongeneral datasets, limitedattention has beengiven to forms the previous best methods with a 52% higher Recall@1. Furthermore, our method also shows practical ability in virus the domain of cryptographic binary. analysis and 1-day vulnerability detection. There are also some methods specifically designed to IndexTerms—BinaryCodeSummarization,CryptographicAl- provide semantic information about cryptographic algorithms gorithm Identification, Binary Code Similarity Detection, Large present in binary code. Existing methods for cryptographic Language Model algorithm identification, which provides the primitive classes contained within binaries, include approaches based on con- I. INTRODUCTION stants[14],[15],[16],statistics[17],[18],[19],[20],structures Cryptography plays a crucial role in computer security. [21], [22], [23], [24], [25], [26], and more. They use static Analyzingthecryptography-relatedcodeinstrippedbinariesis or dynamic analysis to identify distinct patterns to detect common and important in software reverse engineering, such cryptographic implementations within binaries. Various fac- asanalyzingviruseswithencryptioncapabilities,checkingfor tors affecting binary code (e.g., hidden constant features and weak cryptographic implementations in legacy software, and compilationoptimizations)canunderminetheeffectivenessof verifying privacy encryption compliance. Both the complex these methods. Although these classes is a kind of semantics logicofcryptographicalgorithmsandtheabsenceofsymbolic but carry less information compared to function-level sum- informationinstrippedbinariesexacerbatethedifficultyofun- maries. derstanding their binary code. Although modern decompilers In this paper, we first construct a cryptographic binary (e.g., IDA Pro [1]) can heuristically convert binary code into dataset with popular libraries and employ automated methods C-like pseudo-code, it remains challenging without human- to create semantic labels for large-scale binary code. To readable semantic descriptions. Figure out what the Cryptographic binary functions do, we Just as generating documentation for source code enhances propose our framework called FoC, which comprises two maintainability, it is also a way to generate semantic sum- components: (1) FoC-BinLLM is a generative model for maries for binary code to improve the efficiency of analysis. summarizing binary code semantics, where we employ multi- A few methods [2], [3] proposed recently utilize language task and frozen-decoder training strategies, and (2) FoC-Sim models in binary code summarization and have achieved is a similarity model built upon the FoC-BinLLM, where we 4202 raM 72 ]RC.sc[ 1v30481.3042:viXraidentify cryptographic features and use multi-feature fusion cryptographic binary dataset is a laborious task. There are
to train an advanced similarity model. In our experiments, dozens of cryptographic algorithms currently in the public FoC-BinLLMshowsunprecedentedperformanceandprovides domain,andacompletecollectionofimplementationsofthese detailed semantics in natural language, which is beyond the algorithms is challenging. In short, the absence of a publicly reachofpreviousmethods.FoC-Simalsooutperformsexisting available dataset prevents research on the current issue. methodsontheBCSDtasks.Inaddition,FoCshowspromising C2: Variety of Cryptographic Implementation. We need to results in analyzing cryptographic viruses and identifying collect as many implementations of cryptographic algorithms vulnerable cryptographic implementations in the real-world as possible. Since these implementations may have reference firmware. We release the code of FoC and the dataset we todifferentstandardsandprotocolsaswellasbedesignedfor collected1. Our contributions can be summarized as follows: different platforms and purposes. In addition, developers can • We construct a cryptographic binary dataset cross- introduce differences in the same algorithm specification due compiled from popular open-source repositories written toprogrammingstyle.Forexample,thedeveloperstendtosplit in C language, and we devise an automated method a cryptographic algorithm containing complex operations into to create semantic labels for extensive binary functions. multiple functions or combine multiple simple operations into Ourdiscriminatorguaranteesastrongalignmentbetween a single function, which can invalidate some control-flow or these labels and facts on cryptography-related semantics. data-flow based analysis methods. Meanwhile, we cannot ig- • We propose an LLM-based framework FoC to analyze nore the differences introduced by compilation environments, the cryptographic functions in stripped binaries. To our such as optimizations (e.g., loop unrolling, function inlining, knowledge,FoC-BinLLMisthefirstgenerativemodelfor etc.), as well as the architectures. cryptographic binary analysis, summarizing the behav- C3:Well-builtSemanticLabels.First,comprehendingsource ioral semantics of binary functions in natural language. code is not straightforward for humans and therefore it does Fusing code semantics provided by FoC-BinLLM, struc- not meet the requirement of being easy-to-understand. While ture information, and cryptographic features, we further function names and primitive classes are too short to carry buildFoC-Simforretrievinghomologousfunctionsinour moredetails.Second,thesource-codecommentsthatdescribe cryptographic binary database. the functionality well are often absent in the real-world • Experiments show that FoC-BinLLM outperforms Chat- projects. Finally, dealing with a dataset consisting of millions GPT by 14.61% on ROUGE-L score in summarizing of binary functions, the enormous workload makes manual functions from cryptographic binaries. FoC-Sim outper- annotation impossible. forms the previous best methods with a 52% higher Recall@1 in retrieving similar cryptographic functions. C4: Cross-version Awareness. Given the centrality of cryp- FoCalsoshowspromisingoutcomesinanalyzingcrypto- tographic algorithm libraries to computer security and their graphic viruses and identifying vulnerable cryptographic widespreaduse,anyvulnerabilityintheimplementationcould implementations in the real-world firmware. cause unacceptable damage, such as the Heartbleed Vulnera- bility [27]. The fixing of a program vulnerability is often a II. BACKGROUNDANDMOTIVATION tiny change. We thus expect our method to be aware of such A. Problem Definition smallsemanticdifferencesacrossversions.Unfortunately,itis To figure out the cryptographic function in stripped bina- difficult for generative models to reflect these differences in ries, we expect to obtain a comprehensive summary and a their prediction results. embedding representation. Formally, we consider our method, C. Related Works denotedasf,capableofanalyzingacryptographicfunctionF in a binary file B. Our objective is to generate a summary in Binary Code Summarization. It has been proposed only natural language denoted as E elucidating the behavior of F recently. BinT5 [2] and HexT5 [3] are two efforts that focus for the analyst, and a embedding denoted as V for reflecting on this issue. BinT5 is built upon CodeT5 [28] to summarize difference between functions. This process can be formalized decompiled code. HexT5 is a unified pre-training model for as: binary code information inference tasks, including decom- E, V =f(F,B) (1) piled pseudo-code summarization. Both of them are designed for general binary code rather than cryptography domain, To build the method f, we are mainly facing the following therefore, we take them as very basic baselines. In addition, challenges. general LLMs also could be used to generate summaries for B. Challenges binary code with an appropriate prompt. We also conduct a comparison with them. C1: Cryptographic Binary Dataset. A public dataset is CryptographicAlgorithmIdentification.Ithasbeenstudied necessary to evaluate methods for cryptographic function for more than two decades [32]. Many methods based on analysis in stripped binaries. At present, no such datasets program structure have been proposed, especially the data- have been published in previous work. Collecting such a flow graph (DFG). Aligot [24] identifies the data-flow loops 1https://github.com/Ch3nYe/FoC in the execution trace. CryptoHunt [25] and Wherescrypto 2TABLEI Qemu COMPARISONWITHRELATEDWORKSINADDRESSINGCHALLENGES. OpenSSL 2014 2014 2020 2023 Method Sp. C1 C2 C3 C4 Method Sp. C1 C2 C3 C4 OpenSSL_1.x OpenSSL_1.0.1g OpenSSL_1.1.1 OpenSSL_3.0.2
BinT5[2] N. ✗ ✗ ✓ ✗ findcrypt-yara[14] Y. ✗ ✗ ✗ ✗ BoringSSL LibreSSL TongSuo HexT5[3] N. ✗ ✗ ✓ ✗ CryptoKnight[29] Y. ✓ ✗ ✗ ✗ Android GeneralLLMs N. ✗ ✗ ✓ ✗ GENDA[30] Y. ✓ ✓✗ ✗ ✗ Linux Aligot[24] Y. ✗ ✗ ✗ ✗ PalmTree[11] N. ✗ ✗ ✗ ✓ CryptoHunt[25] Y. ✓ ✓✗ ✗ ✗ Trex[12] N. ✗ ✗ ✗ ✓ LibTomCryptNettleMbedTLSLibbcrypt LibgcryptTEAWolfSSLLibsodiumGmSSLTiny-AES-c Wherescrypto[26] Y. ✗ ✗ ✗ ✗ jTrans[13] N. ✗ ✗ ✗ ✓ FindCrypt2[31] Y. ✗ ✗ ✗ ✗ FoC Y. ✓ ✓ ✓ ✓ Fig. 1. Development of open-source cryptography repositories we investi- Signsrch[15] Y. ✗ ✗ ✗ ✗ - - - - - - gated.Thetraindatahighlightedinblue,thetestdatahighlightedinred. Sp.meansitisspeciallydesignedforcryptogrphicbinaryanalysis. Therefore,usingthedataset,wedevelopabinaryLLMtopre- dictcomprehensivedescriptionsforcryptographicfunctionsin stripped binaries, which potentially addresses C3. To mitigate [26] build DFGs with symbolic execution. These methods the weakness of the generative model against C4, we further are based on the manual design of graph patterns for known build a BCSD module based on our binary LLM to create the implementations, and they cannot overcome the C2. It means cross-version aware embedding representation. that these methods are not robust to any factors that make the binary code change. III. DATASETCONSTRUCTION Some cryptographic algorithms contain noticeable features, Dataset Collection. We here collect a cryptographic binary such as constants (e.g., S-box) and statistical attributes. Find- dataset for building our methods, as well as pushing further Crypt2 [31], Signsrch [15], and findcrypt-yara [14] are three research on this issue. We aim to study the cryptographic al- populartoolsthatidentifythecryptographicalgorithmspresent gorithm libraries, popular cryptographic implementations, and in binary files based on constant features. These methods will cryptographic modules in large projects, which are written in fail facing intentionally altered implementations or algorithms Clanguage.Firstly,weconductedareviewofthedevelopment where constant values do not exist. Coarse-grained results at ofexistingcryptographicprojectsandtheirinter-dependencies. the file level (i.e., algorithm classes) fail on C3. As shown in Figure 1 (a), OpenSSL, one of the most popular CryptoKnight [29] and GENDA [30] respectively use CNN cryptographic algorithm libraries, has influenced the devel- and GNN to learn function semantics to predict primitive opment of other libraries (e.g., BoringSSL, LibreSSL, and classes. However, CryptoKnight uses a dataset almost ex- TongSuo) and has been applied in many large projects (e.g., clusively from OpenSSL [33], and GENDA collect dataset Linux,Android,andQemu).Othercryptographicprojectshave from only four cryptographic algorithm libraries. They do not their own unique development histories, and some of them address the data diversity well, failing on C2. aredesignedforspecificscenarios.Forexample,wolfSSLand Binary Code Similarity Detection. BCSD methods aim to MbedTLSarefriendlytoembeddeddevices,whileBoringSSL compare the degree of similarity between two binary code and TongSuo are forked from OpenSSL by enterprises and snippets, and have the potential to overcome C4. Recently, continue to evolve to meet business requirements. However, the advanced three methods employ Transformer encoder as for those independently developed projects, it is challenging their backbones, and have designed their own pre-training to ascertain whether they have been influenced by each other. tasks. Trex[12] uses value prediction in micro-trace to learn Wethusconductedastatisticalanalysisofcodeoverlapamong theexecutionsemantics.PalmTree[11]utilizescontextwindow them, focusing only on the code that would be compiled into prediction and def-use prediction to learning assembly code theirbinaryfiles.Theresultsindicatethatonlyprojectsforked from CFGs and DFGs. While jTrans[13] learns jump-aware from OpenSSL share some similar functions. semanticsthroughjumptarget predictionpre-training.Wecan As shown in Figure 2, we employ two compilers, GCC- employthemfordetectingcryptographicfunctionsinbinaries, 11.2.0andClang-13.0,withfourdifferentoptimizationoptions although they are not designed for this domain. (O0-3). These projects were compiled for six different target As shown in Table I, previous works have not adequately architectures:x86 32,x86 64,arm 32,arm 64,mips 32,and addressed the challenges mentioned above. mips 64. Subsequently, we strip the binaries to make them consistent with release versions in real-world scenarios. IDA D. Large Language Model and Our Motivation Pro was used to decompile the binary code. We performed Since the LLMs already show powerful understanding ca- deduplication on all functions in our dataset by the MD5 pabilitiesinbothnaturallanguage(NL)andprogramlanguage digest of binary functions. Our dataset contains considerable (PL),ithasbecomepossibletodeviseanautomatedpipelineto cryptographicalgorithmsandtakesaccountofthecompilation createhigh-qualitysemanticdescriptionsforthecryptographic environment, which allows us to overcome C1 and C2. The function in source code. It mitigates the challenge of creating statistical information of the dataset is shown in Table II. semantic labels (C3). We thus can confidently gather a rich Semantic Labels Creation. Comment in the source code cryptographic binary dataset to address C1 and C2. is a comprehensive semantic summary. Unfortunately, not
A recent evaluation [34] shows that LLMs perform signif- all functions have developer-written comments, we detected icantly worse on summarizing binary code than source code. commentsinlessthan20%ofthefunctionswithinourdataset. 3Dwarf Debug Information OffsetAddress TABLEIII CATEGORIESANDCLASSESEMPLOYEDBYTHEDISCRIMINATOR. Cross Compilation Strip Decompilation Category Class Source Binary W/ Stripped Binary Code Debug Information Binary Code 3des,aes,aria,blake2,blowfish,camellia,cast, chacha20,cmac,curve25519,curve448,des,dh, Metadata dsa,ecc,ecdh,ecdsa,ecjpake,ed448,ed25519, Build Prompt CryptographicPrimitiveClass hmac,idea,md4,md5,mdc2,poly1305,rc2,rc4, ripemd160,rsa,salsa20,sha1,sha224,sha256, Collector ChatGPT Discriminator Summary sha384,sha512,sha3,siphash,sm2,sm3,sm4, tea,umac,whirlpool,xtea SYSTEM_PROMPT = '''Imagine you are an experienced software developer. The user will provide a source code function and its basic BlockEncryptionMode cbc,pcbc,cfb,ctr,ecb,ofb,ocf,xts information each time. Your task is to generate a comment to the function. Please follow the rules below: AuthenticatedEncryptionMode ccm,gcm,sgcm,cwc,eax,ocb,siv,iapm 1. Comment should be accurate, precise, and helpful for code understanding. 2. You can leverage the original comments in the source code, but you cannot directly copy the original comments. TABLEIV 3. You need to write comments in one sentence. QUALITATIVECOMPARISONOFDEVELOPER-WRITTENCOMMENTS USER_PROMPT = '''Here is a source code function from {path} file in (DEFECTIVE)WITHMODEL-GENERATEDSUMMARIES. the {project} project: ```C/C++ {comment} DefectType1 Comparison2 {code} ``` Prompt Template /*UpuntilOpenSSL0.9.5a,thiswasnew section*/ ''' Lack Retrievesaconfigurationsectionfromthespecified configurationobject. Fig.2. Theworkflowofbuildingourcryptographicbinarydataset. /**Theoldinterfacetogetthesamething asSSL get ciphers()*/ Indirect TABLEII Retrievesthenameofthecipheratthespecifiedindexfrom STATISTICSOFOURCRYPTOGRAPHICBINARYDATASET. thecipherlistoftheSSLconnection. Dataset Project Volume(MB) Binaries Functions Functions-Uni2 /*-Somefunctionsallowforrepresentationoftheirreducible *polynomialsasanint[],sayp.Theirreduciblef(t)isthen OpenSSL1 1,302.02 192 787,535 659,112 *oftheform: BoringSSL 543.26 144 222,084 189,354 *tˆp[0]+tˆp[1]+...+tˆp[k] LibreSSL 885.08 144 499,192 416,304 Redundant *wherem=p[0]>p[1]>...>p[k]=0. TongSuo 538.11 96 319,701 273,134 /*Performsmodularreductionofaandstoreresultinr. MbedTLS1 232.79 288 106,430 95,155 rcouldbea.*/ Train LibTomCrypt 214.08 48 33,844 8,152 Performsmodularreductionofabinarypolynomialand Libbcrypt 3.19 48 706 663 storestheresultinr. Libgcrypt 208.22 48 61,904 55,161 Nettle 110.06 96 40,644 37,941 1DefectTypesofdeveloper-writtencomments:thelackoffunctionaldescrip- TEA 1.51 40 660 155 tions,theindirectinformationgiven,andtheredundantinformation. 2Theupperisdeveloper-written,andthelowerismodel-generated. Libsodium 81.81 48 32,460 20,456 wolfSSL 115.83 48 47,574 42,618 Test GmSSL 113.73 48 60,028 58,472 ness of generated summaries on the crucial cryptographic tiny-AES-c 1.24 48 572 523 semantics. Specifically, as shown in Table III, we have de- Overall 14 4,350.67 1,336 2,213,334 1,857,200 fined three categories and specific classes. We use whole- 1OpenSSLandMbedTLShavetwoversionsinthedataset. word matching to retrieve class-related keywords. For exam- 2Functions-Uni means the number of unique functions after deduplication ple, aes will be marked with ’aes’, ’rijndael’, and accordingtofunctionhash. ’advanced encryption standard’ etc. Only if the discriminator gets the same classes between the generation Worseyet,theirformatsareinconsistent,andthequalityisnot and the source code, will it pass the inspection. satisfactory.Weidentifythethreemostcommondefectsshown The results indicate that more than 85% of the generation in Table IV. To mitigate this problem, we leverage ChatGPT passed and they were retained in the end. We further evaluate [35], an advanced general LLM, to automatically generate the textual consistency between developer-written comments summaries as comments. Specifically, we use the metadata and model-generated summaries. The results show that they extracted from the source code for each function to build the have a 43.55% ROUGE-L score, which means a high degree prompt shown in Figure 2. We prompt the LLM to generate of consistency. And Table IV shows a few generated labels. only one-sentence summaries to try to avoid including too Overall, for each function in our dataset, we obtain its binary much information that is not present in the pseudo-code, such code, source code, and semantic labels, which highly align as variable names and macro definitions, which potentially with the facts. In this way, we addressed the challenges bias the model. On the other hand, a one-sentence summary presented in C3. is easier to read and understand. IV. OVERVIEW Keyword-based Discriminator. It is essential to assess whether these summaries align with the facts. Therefore, we Figure 3 illustrates our framework FoC, which consists of propose a keyword-based discriminator to judge the correct- two parts. We will briefly describe them below. 4V. DETAILEDDESIGN ①Binary Large Language Model A. Binary Large Language Model Formatted Summary: Initialization <COMMENT> Encrypts or decrypts
Frozen a given input using Golden LLM Encoder Decoder A <E /S C Oi Mn M EC NB TC > mode. Prompt:Pseudo Code < AEF SU _N cC bN cA _M eE n> c rypt Response: <Label> </FUNCNAME> Build Data Binary LLM Data Processing P Cse ou dd eo ACFG Features Extractor B Cin oa dr ey Multi-Head Attention Crypto Features Crypto Crypto Database Function Input Text Graph Conv MLP Network Function Embedding Vulnerability Vulnerability Database Semantic Encoder ②Binary Code Similarity Model Fig.3. AnoverviewofFoC. Build Binary Large Language Model. In this part, our goal is to train an LLM to tell us the behavior of cryptographic functions in stripped binaries. To this end, we employ three tasks and a frozen-decoder training strategy for training our Binary Large Language Model (FoC-BinLLM) efficiently. We adopt the Transformer model following the encoder- decoder architecture, which allows us to flexibly apply it in an encoder-only mode for semantic embedding generation, or in an encoder-decoder mode for causal generation. As shown in Figure 3 ①, we initialize our model with the pre-trained weightsfromagoldimplementation,whichallowsustoavoid heavy training from scratch. We train the base model on our dataset to specialize it for understanding binary code. Our FoC-BinLLM takes the pseudo-code of an unknown function as input and generates a formatted summary for participants, which allows for detailed semantics and mitigates C3. Build Binary Code Similarity Model. We further build a binarycodesimilaritymodel(FoC-Sim)tosearchforfunctions similar to an unknown function within the database. It is built uponourbinaryLLMandincorporatesmoreinformationfrom binary functions. FoC-Sim takes the pseudo-code, the attributed control-flow graph (ACFG),and thecryptographic featuresas input,which can be easily extracted from the binary function using a modern decompiler, such as IDA Pro [1]. The model then creates an embedding representation for the function. It is aware of any changes in the binary code, which compensates forthelackofsensitivityofourgenerativemodelandmitigates thechallengeC4.Additionally,wecanuseFoC-Simtoinspect binariestoidentifyvulnerablecryptographicimplementations. redocnE elbaniarT Feed Foward Initialize With Golden Causal Model redoceD nezorF Task3: Binary-Source Contrastive Learning Task1: Binary Code Summarization Task2: Function Name Prediction Feed Foward Multi-Head Attention Masked Multi- Head Attention Decoder Input Fig.4. AnoverviewofthetrainingofFoC-BinLLM. Golden Model Initialization. It is intuitively more efficient to train an expert model from a pre-trained model than from scratch, especially when discussing LLMs. As shown in Figure 4, we initialize our binary LLM base on a gold implementation,specificallyCodeT5+[36],arecentlyreleased LLMforcodeunderstandingandgenerationinthesource-code domain. CodeT5+ is initialized with weights from previous pre- trained LLMs (CodeGen-mono [37]) and is trained on two large-scale datasets: a multilingual dataset 2 and the Code- SearchNet [38]. Benefiting from the training on bimodal data consisting of both natural language (NL) and programming language(PL),CodeT5+achievedstate-of-the-artperformance in various downstream tasks, such as code generation and code summarization, at the time of its release. The pre-built understanding and generation capabilities of CodeT5+ for NL and PL are the cornerstone of training it to become an expert LLM. Multi-Task & Frozen-Decoder Training. We adopt multi- task training to build our cryptographic binary LLM. Figure 4 shows the three tasks: (Task1) Binary Code Summarization, (Task2)FunctionNamePrediction,and(Task3)Binary-Source Contrastive Learning. Both Task1 and Task2 are causal gen- eration tasks, which will be employed with encoder-decoder mode. Task3 optimizes only the semantic embeddings gener- ated by the encoder. As mentioned in Section III, we have collected as many summaries as possible for the binary functions in our dataset andonlykepttheonesthatpassthediscriminatorcheck.They provide more details than the function name, for example, Figure 5 (a) shows a function DES_cfb_encrypt(..., int enc) from project OpenSSL, and this function’s name 2https://huggingface.co/datasets/codeparrot/github-code 5Function: sub_126670 (DES_cfb_encrypt) __int64 __fastcall sub_0BA040(__int64 a1, void AES_cbc_encrypt(const unsigned char ------------------------------------------------------------ __int64 a2, __int64 a3, __int64 a4, *in,unsigned char *out,size_t len, Input: __int64 a5, int a6) { const AES_KEY* key, unsigned char *ivec, summarize in one sentence: __int64 result; // rax const int enc){ int *__fastcall sub_126670(__int64 a1, if ( a6 )result = sub a_ 41 ,B a6 5E ,1 s0 u( ba _1 0, Ba A2 6, Ea 03 ), ; if (enc k) ey,C R iY vP eT cO ,_ c (b bc l1 o2 c8 k_ 1e 2n 8c _r fy )p t A( Ei Sn _, e no cu rt y, pl te )n ;, __int64 a2, int a3, unsigned __int64 a4, elseresult = sub_1B6F50(a1,a2,a3,a4, elseCRYPTO_cbc128_decrypt(in, out, len, __int64 a5, int *a6, int a7) { a5,sub_0BAAA0); key, ivec, (block128_f) AES_decrypt); int *result; // rax return result; } } ... result = (int *)(unsigned int)(a3 - 1); if ( (unsigned int)result <= 0x3F ) { Semantic Encoder v9 = (a3 + 7) >> 3; ... } return result;
} Binary Code Embedding Source Code Embedding ### Response: ------------------------------------------------------------ Binary-Source Contrastive Learning Output: This function takes an input data block and encrypts or Fig.6. Anillustrationshowsthesemanticencoderusescontrastivelearning decrypts it using the DES algorithm in Cipher Feedback (CFB) mode. to shorten the distance between the source code embedding and the corre- spondingbinarycodeembedding. (a) BinaryCodeSummarization. TABLEV F --u -n -c -t --i -o -n -: - --s -u -b -_ --1 -2 -6 -6 --7 -0 - -( -D --E -S -_ -c --f -b -_ -e -n --c -r -y -p --t -) -------------- SUMMARYOFSTATISTICALFEATURESUSEDINFOCATBLOCK-LEVEL Input: ANDFUNCTION-LEVEL. recovery function name: int *__fastcall sub_126670(__int64 a1, __int64 a2, int a3, unsigned __int64 a4, Type Name DimensionFeatureType* Examples __int64 a5, int *a6, int a7) { int *result; // rax ... No.ofGeneralOpcode 1 G mov,push,cmp result = (int *)(unsigned int)(a3 - 1); No.ofArithmeticOpcode 1 C sub,add,mul if ( (unsigned int)result <= 0x3F ) { BasicBlockNo.ofLogicOpcode 1 C and,xor,ror v9 = (a3 + 7) >> 3; ... } return result; Level No.ofBranchOpcode 1 G beq,jmp,ret } BoWofGeneralOpcode 120 G - ### Response: ------------------------------------------------------------ BoWofArithmeticOpcode76 C - Output: No.ofBBLs 1 G - DES_cfb_encrypt No.ofEdges 1 G - Function (b) FunctionNamePrediction. Level No.ofCallees 1 G - No.ofUniqueCallees 1 G - BoWofKeywords 61 C aes,des,dsa Fig. 5. An illustration shows two generation tasks that we employ to train FoC-BinLLM. *This feature is categorized as either a general feature (G) or a cryptographic feature(C). alone cannot indicate that it performs encryption or decryp- tion operations. These straightforward semantic descriptions sourcecodeandbinarycodeinembeddingspace,whichassists includenoredundantinformationthatimpedesunderstanding. the base model in rapid domain adaptation. We use the cosine These advantages keep Task1 from being overly difficult to distance to measure the similarity between the embeddings. learn and biasing the model. As Equation 3 illustrates, the cosine-similarity loss is used to As shown in Figure 5 (b), we also train FoC-BinLLM to optimizethemodelparameters,wheretheV andV source binary reassign descriptive names for the functions whose names areembeddingsofthesamefunctioninsourcecodeandbinary are stripped for various reasons (e.g., copyright protection code, respectively: and size reduction). This means that for binary functions where summaries have not been correctly generated, FoC- LCL=(cid:13) (cid:13) (cid:13)1− Vsource·V binary (cid:13) (cid:13) (cid:13) (3) BinLLM can still learn their semantics in natural language. (cid:13) |Vsource|×|V binary|(cid:13) It is based on the insight from previous work [39], [36] A descriptive function name is usually a concise summary of that the decoder is used for complex causal-generation tasks the functionality and is helpful for program comprehension in and hence requires more careful training. Instead of training binaries. the entire large model, as shown in Figure 4, we freeze the Figure 5 shows two examples for Task1 and Task2, where decoder and set only the encoder and the cross-attention layer we build the input text with code and a prompt prefix, and to be trainable, which reduces a large number of trainable theoutputisthefunctionnameorsummary.Bothofthemare parameters for efficient training. auto-regressive generation task, which predicts the next token with the current token sequence. The loss function we used is B. Binary Code Similarity Model cross-entropy and can be formalized as: Code Semantic & Control Structure Encoding. We have |X| built FoC-BinLLM above which can capture the semantics of LGEN =−(cid:88) logP(xi|Xˆ 0:i−1) (2) binary code well. We here directly leverage its encoder to i=1 create the semantic encoder of FoC-Sim. In Figure 3 ②, the where X is the output sequence, P is the probability of encoder takes a pseudo-code lifted from the binary function predicting the i-th token x base on the part of label Xˆ . and generates an embedding as its semantic representation. i 0:i−1 We train our model to maximum P for each token in labels. Specifically, we use mean pooling on the hidden states in the Since the base model is trained on source code rather than last layer of the encoder. binary code, as shown in Figure 6, we employ Binary-Source Furthermore, we adopt a Graph Convolutional Network Contrastive Learning (Task3) to shorten the distance between (GCN) to capture the information of the function’s control 6structure, which is essential for the binary code similarity of N, denoted as B, containing only similar pairs, and these problem according to previous research [40]. As shown in sample pairs are not drawn from the same group pairwise: T sta ab til se ti3 c, alw fe eae tx ut rr ea sct toth ce rec ao tn etr to hl e-fl ao tw tribg ura tep dh C(C FF GG () Aa Cn Fd Gu )s .e Tth he e 1 (cid:88)N  esim(cid:16) Vi,V i+(cid:17) /τ  features we used in the basic block level are shown in Table Lsim=− N i=1 log
(cid:80) esim(cid:16) Vi,V j+(cid:17) /τ  (6) j∈B∧j̸=i V.The GCNmodel usesafeature encoderto generatefeature embeddingforeachnode,andthenapplymessagepropagation where V i and V i+ are the function embeddings of a pair of for aggregating the information of neighbors along the edges similar samples, τ is a temperature parameter, and sim is the in the ACFG. For each node v , its hidden state in l-th layer similarity function of embeddings. In addition, as shown in i is denoted as h(l) (for (i = 1,2,...,n)). In each layer of the Figure 3, the semantic encoder is the largest module in our i GCN, the message aggregation process can be described as similaritymodel.Wefreezeitsparameterstoenhancetraining follows: efficiency, as our evaluation found that it has already been well-trained in the first part.   h( il+1)=ReLU j∈(cid:88) N(vi)(cid:112) deg(vi)1 (cid:112) deg(vj) ·(cid:16) W(l)·h( jl)(cid:17)  (4) Inthissection,wecV oI n. duE cV tA eL xU teA nT siI vO eN experimentstoanswer the following research questions: where N(v ) and deg(v ) are the set of neighboring nodes i i and the degree of node v , the W(l) is the weights for the l-th RQ1: How well does FoC-BinLLM perform in summarizing i layer of the GCN, and ReLU denotes the activation function semantics in cryptographic stripped binaries? (Section VI-B) we used. RQ2:HowwelldoesFoC-Simperforminbinarycodesimilar- We employ a 5-layer GCN to aggregate information from ity detection, especially in the cryptography domain? (Section neighboring nodes, meaning that each node can potentially VI-C) access information from neighbors within five jumps. Finally, RQ3:DoesFoChavepracticalabilityinreal-worldscenarios? throughasummationreadoutoperation,thehiddenstatesofall (Section VI-D) nodes are aggregated to obtain a vector for the representation RQ4: Which part of our method contributes more in FoC? of the entire graph, namely the function structure embedding. (Section VI-E) CryptographicFeatures.Giventhatwefocusonthecryptog- A. Experiment Setup raphydomain,weidentifyasetoffeaturesusedtodistinguish Model & Training Setting. We initialize the weights of binaryfunctionsimplementedfromdifferentalgorithmsbetter. our binary LLM (FoC-BinLLM) with CodeT5p-220m[36]. All of the features we used are detailed in Table V. By default, FoC-BinLLM has a 12-layer encoder and a 12- Inspiredbypreviousworks[41],[16],[40],weusethenum- layer decoder, 768 hidden size, trainable parameters 38.11%, ber of arithmetic and logic opcode as features in basic block- 32100vocabsize,and1024inputlength.FoC-Simconsistsof level, as well as the BoW of frequent arithmetic opcodes. a semantic encoder initialized from FoC-BinLLM, a 5-layer Meanwhile,atthefunctionlevel,weemploythediscriminator GCN, and a 256-dimensional single-layer MLP. designed in Section III to identify keywords from the pseudo- During training the FoC-BinLLM, we use Adam optimizer code. We then create a BoW vector of the cryptographic class with 1e-4 learning rate, 0.1 weight decay rate, 64 batch size, corresponding to the keywords, which incorporates possible and 4 training epochs in total (1 for Task3 and 3 for Task1 & string and symbol information. As illustrated in Figure 3, Task2). While training the FoC-Sim, we use Adam optimizer all cryptographic features are fed into the final function with1e-3learningrate,1e-5weightdecayrate,128batchsize, embedding from both levels. and 110,000 training steps. Embedding Fusion & Model Training. As discussed above, Dataset. Using only the cryptographic dataset could result the semantic encoder generates semantic embeddings for in a biased model, therefore we used the same compilation pseudo-code, the GCN generates structural information em- environment to build general data from the GNU repositories beddings for ACFGs, and we handcraft the embeddings from 3 and added them to our training set. We prevented data statistical features. We use a single-layer MLP to fuse these leakage from code shared between projects by using MD5 embeddings, which can be formalized as: deduplication. Further, we remove textually similar data via MinHash[43](threshold4=0.95)topreventoverfitting.Finally, V func=MLP(concat(Encoder(pseudo-code), (5) we split 5% of the training data as the validation set. The GCN(ACFG), V )) manual statistics are shown in Table VI. To train FoC-Sim, the similar function pairs are sampled Environment. We used an Ubuntu 20.04 machine with a from our cryptographic binary dataset, where functions with 48-core Intel Xeon Gold 5220 CPU (2.0GHz, 42MB L3 the same function name in the same file from the same Cache),256GBRAM,and10*NVIDIARTX3090GPU.We project are treated as similar and vice versa. We employ the 3http://ftp.gnu.org/gnu MultipleNegativesRankingLoss [42] as the loss function. As 4https://github.com/bigcode-project/bigcode-dataset/blob/main/near illustrated in Equation 6, it processes mini-batch samples size deduplication/minhash deduplication.py 7TABLEVI TABLEVIII STATISTICSOFTHEFINALDATASETUSEDINTHEEVALUATION. COMPARISONWITHEXISTINGMETHODSONTHENUMBEROF CRYPTOGRAPHICPRIMITIVECLASSESIDENTIFIEDINBINARIES. Train Valid Test Project tiny-AES-c wolfSSL Libsodium GmSSL Overall Time(ms)1 MD5-Dedup 2,982,036 156,949 122,069 MiniHash-Dedup 2,388,677 125,719 122,069 FindCrypt2[31] 1 5 2 6 14 0.226
Signsrch[15] 1 7 3 10 21 0.129 findcrypt-yara[14] 1 7 2 7 17 0.081 TABLEVII Wherescrypto[26] 1 3 0 0 4 1209 COMPARISONWITHEXISTINGMETHODSONBINARYCODE FoC-BinLLM2 1(0) 12(1) 14(2) 19(2) 46(5) 0.228 SUMMARIZATION. 1Averagetimecostoneachbinaryfunction. 2Inparenthesesisthenumberoffalsepositivecryptographicprimitives. Method ROUGE-L BLEU-4 METEOR Time(s) BinT5[2] 0.1398 0.0132 0.0925 0.2697 TABLEIX HexT5*[3] 0.0927 0.0098 0.1057 1.1378 RESULTSOFBINARYCODESIMILARITYDETECTIONONTHEGENERAL Mixtral[51] 0.4006 0.1109 0.3283 9.0232 DATASET. ChatGPT[35] 0.3607 0.1356 0.3640 - FoC-BinLLM 0.4134 0.1447 0.4020 0.1533 AUC(one2one) XM(one2many) Method Description* *ThankstotheauthorsofHexT5forsharingtheirmodelwithus. XC XC+XB XA XM MRR@10Recall@1Recall@10 Zeek[52] S 0.84 0.85 0.840.84 0.28 0.13 0.56 Gemini[53] G+F 0.81 0.82 0.800.81 0.36 0.28 0.53 SAFE[9] S 0.80 0.81 0.800.81 0.29 0.16 0.46 employ BinKit [44] to build a cross-compiling environment Asm2Vec[54]S+G 0.77 0.69 0.600.65 0.12 0.07 0.18 to construct our binary dataset. We then use IDA Pro [1] GMN[55] G+F 0.85 0.86 0.860.86 0.53 0.45 0.58 to decompile binary functions from stripped binaries and use FoC-Sim S+G+F 0.99 0.98 0.970.99 0.83 0.78 0.95 srcML to extract metadata from source code. As for model *CodeSemantics(S),GraphStructure(G),FeatureEngineering(F). training, we use Python language with PyTorch [45] and transformers[46]toimplementourmodels,andacceleratethe are based on cryptographic constant values and signatures. training with DeepSpeed [47] in ZeRO2. Wherescrypto offers only executable for 32-bit binary and supports four cryptographic algorithms (i.e., AES, SHA1, B. Cryptographic Binary Code Summarization MD5, and XTEA). Other methods mentioned in Section II-C Metrics. We employ three text consistency metrics to show cannot be reproduced due to various reasons, such as not yet comprehensiveassessment:ROUGE-L[48]isarecall-oriented being open-sourced or dependencies being inaccessible. metric, BLEU-4 [49] emphasizes precision, and METEOR We conduct an experiment using four binary in x86_64 [50] is more balanced and considers synonyms. Time is the from our test set. The results in Table VIII present the num- average analysis time for each function. ber of primitive classes correctly identified by each method. The results are shown in Table VII. FoC-BinLLM shows FoC-BinLLM has successfully identified a greater number impressive performance on the test set. Specifically, we have compared to other methods but has more misprediction. The 41.34%, 14.47%, and 40.20% scores on ROUGE-L, BLEU- other methods have no false positives, benefiting from their 4, and METEOR, respectively. We even outperform Mix- design.Overall,ourFoC-BinLLMcansummarizethebehavior tral (Mixtral-8x7B-Instruct-v0.15) and ChatGPT (chatgpt-3.5- of cryptography-related functions in stripped binaries and turbo-11066), which are generally much larger than FoC- achievessuperiorperformancecomparedtopreviousmethods. BinLLM. Both BinT5 and HexT5 exhibit significant perfor- C. Binary Code Similarity Detection mance degradation compared to results reported by them, Metrics. We evaluate FoC-Sim and existing BCSD methods whichcouldbeattributedtoourcryptographicbinarysurpass- in one2one and one2many search scenarios. Following the ingthedomainwheretheycollecttheirdataset.Fortimeover- previous study [40], we use Area Under ROC (AUC) for head, our approach also has advantages due to its relatively one2onesearch,MeanReciprocalRank(MRR@K)andRecall small size. ChatGPT is accessed through API, which depends (Recall@K) at different K thresholds for one2many search. on the network situation, so its time was not evaluated. Firstly, we conduct experiments on a benchmark dataset Cryptographic Algorithm Identification. In analysis tasks, released by previous work [40], which contains 7 projects, such as detecting weak cryptographic algorithms, the analyst two of which are used as the test set. We here use its original needs to know which primitives are used in binaries. There- experimentsetup.Inparticular,fortheone2onesearch,thetest fore,wefurtherperformcomparisonexperimentsonthistask. set consists of 50k positive pairs and 50k negative pairs. For Weuseourkeyword-baseddiscriminatortoidentifyprimitives the one2many search, 1,400 positive pairs and 140k negative from FoC-BinLLM prediction. pairs are selected, i.e., finding one positive function in 101 We select the popular tools and Wherescrypto[26] as the functions. baseline methods. FindCrypt2, findcrypt-yara, and Signsrch As shown in Table IX, FoC-Sim presents excellent per- 5https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1 formance on the general datasets. Specifically, we achieves 6https://platform.openai.com/docs/models a 78% Recall@1 that exceeds the baseline method GMN [55] 8label: mbedtls_x509_crt_init label: mbedtls_x509_crt_parse
main Pseudo Code: __int64 __fastcall sub_2DCC0( _QWORD *a1) { Pseudo Code: __int64 __fastcall sub_2BD10(void *ptr, void ...... *src, size_t n) { --------------------------------------------------------- ...... ReName: ssl_init ------------------------------------------------------------ ...... Summary: Initialize the SSL context. ReName: pem_read_cert Summary: Reads a PEM-encoded certificate from a file. label: mbedtls_ssl_write label: mbedtls_pk_parse_key Pseudo Code: __int64 __fastcall sub_1C980(__int64 a1, __int64 Pseudo Code: __int64 __fastcall sub_48380(__int64 a1, __int64 a2, a2, unsigned __int64 a3, __int64 a4, __int64 a5, int a6){...... __int64 a3, __int64 a4, __int64 a5) {...... -------------------------------------------------------------- ---------------------------------------------------------------------- ReName: decode_private_key ReName: ssl_check_ctr_renegotiate Summary: Decode a private key from a buffer and return the type Summary: Check if the TLS handshake is valid and renegotiate. of the decoded key. Fig.7. AnexampleofanalysisbinarycryptographicfunctioninaviruswithFoC. TABLEX TABLEXI RESULTSOFBINARYCODESIMILARITYDETECTIONONTHE RESULTSOFVULNERABLECRYPTOGRAPHICFUNCTIONSDETECTIONIN CRYPTOGRAPHICDATASET. REAL-WORLDFIRMWARE. AUC(one2one) XM(one2many) VulnDetect Vuln&Patch Method Description* Library CVE-ID XO XC XA XM MRR@10Recall@1Recall@10 NETGEARTP-LINK Xiaomi NETGEARTP-LINK Xiaomi PalmTree[11]S+G 0.9020.871 - 0.865 0.465 0.391 0.639 CVE-2015-0286 – 3/3 22/22 – 3/3 22/22 CVE-2015-0289 – 10/12 82/88 – 7/12 75/88 Trex[12] S 0.9050.9050.8310.785 0.302 0.211 0.580 CVE-2015-1790 – 3/3 22/22 – 2/3 15/22 jTrans[13] S 0.9290.9230.8450.841 0.463 0.380 0.668 CVE-2016-0797 – 20/20 44/44 – 20/20 44/44 FoC-Sim S+G+F 0.9960.9960.9980.994 0.940 0.910 0.990 CVE-2016-2105 – 10/10 22/22 – 10/10 22/22 OpenSSLCVE-2016-2180 6/6 10/10 22/22 6/6 10/10 22/22 *CodeSemantics(S),GraphStructure(G),FeatureEngineering(F). CVE-2017-3731 8/8 – – 4/8 – – CVE-2019-1547 16/16 8/8 15/15 11/16 6/8 15/15 CVE-2020-1971 19/19 20/20 15/15 13/19 18/20 9/15 CVE-2021-23841 20/20 22/22 15/15 20/20 22/22 15/15 by 33%. GMN utilizes CFG and BoW of Opcode from a CVE-2022-0778 8/8 6/6 5/5 7/8 6/6 5/5 binary function, which is a subset of the information fused CVE-2021-36475 3/3 13/16 14/15 3/3 16/16 15/15 CVE-2021-36476 3/3 14/14 12/12 0/3 4/14 6/12 by our similarity model. Besides that, FoC-Sim benefits from mbedTLS CVE-2021-36647 4/4 13/16 14/15 4/4 13/16 15/15 CVE-2021-43666 4/4 16/16 15/15 0/4 0/16 6/15 semantic information provided by our binary LLM, which LibgcryptCVE-2021-40528 0/6 0/5 0/3 2/6 5/5 3/3 could be an essential factor for the advantage. Total #16 91/97 168/181319/330 70/97 142/181289/330 To further evaluate, we fine-tune and evaluate the state-of- Theformeristhenumberofvulnerabilitiesdiscoveredandthelatteristhetotalnumberof the-art BCSD methods on our cryptographic dataset. Specifi- potentialvulnerabilities. cally, we employ PalmTree [11], Trex [12], and jTrans [13]. We use the same settings as mentioned above. The results are shown in Table X. FoC-Sim significantly However, understanding the binary code within its executable outperformstheothermethodsonallmetrics.Specifically,our file is challenging for defenders due to the absence of symbol methodachieves91%Recall@1ontheXMtaskofone2many information. search, while for one-to-one searches we achieve more than WeshowapartofouranalysisinFigure7,whereeachbox 99% AUC for all sub-tasks. Notably, all of the previous represents a binary function in the virus, and we manually methods generate semantic embedding from assembly code, obtained the corresponding function name from the source which is sensitive to the compilation environment, especially code and judged how well our model predictions agreed with in the XA task. Instead, FoC-Sim can benefit from the cross- thefacts.Westartouranalysisfromtheentrypoint,themain architecture capabilities provided by the pseudo-code. The function, and analyze the callee functions within. Initially, control structure information and cryptographic features we we encounter a series of context initialization functions, such identified also boost our model. as the function sub_2DCC0. FoC-BinLLM conducted auto- matedanalysisandpredictedafunctionnamessl_initand D. Practical Ability a summary, describing its functionality correctly. In this section, we explore the ability of FoC in two real-
Then, functions are called one after another. And FoC- world scenarios: (1) analyzing cryptographic functions in a BinLLM provides us with comprehensible descriptions virus, and (2) retrieving vulnerable cryptographic implemen- in NL. It is noteworthy that the predicted name for tations in firmware. mbedtls_ssl_writedoesnotmatchtheoriginalnamebut CryptographicFunctionAnalysisinVirus.WeemployFoC- mentions the behavior of handshake verification. These reveal BinLLM to analyze an open-source Linux Remote Access the potential of FoC-BinLLM in automated malware analysis. Trojan (RAT) sample named splinter 7. Since the source code Vulnerablity Detection. To further explore our similarity is available, we know that it utilizes the cryptographic library model, FoC-Sim, we utilize it to detect vulnerable crypto- MbedTLStoimplementtheencryptedcommunicationmodule. graphicfunctions.Wefirstbuildavulnerabilitydatabasewith 7https://github.com/tuian/splinter vulnerable & patched functions related to 16 CVEs. We then 95 4 3 2 1 0 200 5K 10K 15K 20K 25K 30K steps ssol_niart chkp_crypto-pcode_binllm-220m-1024 2.5 chkp_crypto-pcode_binllm-2b-1024 2.0 1.5 1.0 0.5 500 700 900 1100 1300 1500 steps (a) Ablation of parameters used in FoC-BinLLM. ssol_niart TABLEXII ABLATIONSTUDYRESULTSOFFOC-SIMMODEL. chkp_gnu_binllm-220m-1024_wo_Task3 chkp_gnu_binllm-220m-1024 AUC(one2one) XM(one2many) Model XO XC XA XM MRR@10Recall@1Recall@10 FoC-Sim 0.9960.9960.9980.994 0.940 0.910 0.990 w/oCodeSemantics 0.9760.9770.9840.971 0.802 0.724 0.951 w/oControlStructure 0.9860.9860.8770.983 0.907 0.868 0.981 w/oCryptographicFeatures0.9960.9930.9940.991 0.939 0.901 0.987 (b) Ablation of Task3 used in FoC- BinLLM. Fig.8. LosscurvesofablationstudyofFoC-BinLLM. In particular, the code semantics from our binary LLM plays an important role, especially in the one2many search, where FoC-Sim (w/o Code Semantics) drops by 18% on Recall@1 collect cryptographic libraries from the firmware of different sharply. vendors to build a firmware database. We determine the existenceofvulnerablefunctionsbasedontheversionnumber ofthelibraryfile.Weperformtwosearchtasks.(1)Wesearch VII. DISCUSSION for vulnerable functions in the firmware database according Inthissection,wediscussthelimitationsofourmethodand to the vulnerability database. It is considered a successful potential ways for future research. identification if the vulnerable functions are found among the Quality of Summaries. LLM-generated summaries are an top10mostsimilarinallfunctionsfromasuspiciousfile.The NL translation of source code (including developer-written resultsareshowninthe“VulnDetect”columninTableXI.(2) comments), their quality largely depends on the LLMs used. We evaluate the ability of the model to distinguish between We can use the discriminator to check the crucial semantics vulnerable functions and patched functions. Specifically, a within the cryptography domain. However, in the general vulnerable function from the firmware database is considered domain, how to create better semantic labels for binary code to be successfully distinguished if it has a higher similarity is a worthwhile research direction. to the vulnerable version rather than the patched version. The Primitive Classes of Discriminator. We have investigated results are shown in the “Vuln & Patch” column in Table XI. popular open-source repositories, collected a number of the The results show that FoC-Sim can accurately detect the most common primitives from them, and included keywords majority of vulnerable functions and can correctly distinguish associated with them. However, we must recognize that omis- vulnerable functions from patched functions. It demonstrates sions exist. Systematically researching and collecting these that FoC-Sim has the ability to overcome C4. However, we cryptographicprimitivesisalsovaluableworkthatcanhelpus observed a failed case in Libgcrypt. With manual inspection, understandwhatcryptographicalgorithmsarecurrentlysecure wefoundthatthereisahugedifferencebetweenthevulnerable and what scenarios they are suitable for. functions from the firmware database and the vulnerability Obfuscated Binaries. In this paper, we have not considered database in both text and structure. obfuscated binaries. Aligot [24] and CryptoHunt [25] design methodsforcryptographicalgorithmdetectionbasedoninput- E. Ablation Study output relationships of loop structures in obfuscated binaries. Parameters & Training-Task of Binary LLM. We further However, their methods are limited by manually involved train a scaled FoC-BinLLM with 2B parameters on the same work.EnhancingtherobustnessofFoCtoobfuscatedbinaries dataset,usingthesametrainingtasksandstrategies.Itslightly is our future work. outperforms the default one on binary code summarization (43.96% v.s. 41.34% on Rouge-L). However, Figure 8 (a) VIII. CONCLUSION shows that the 2B model has a faster convergence speed. We leverage contrastive learning (Task3) described in Sec- In this paper, our work addressed the challenges that ex- tionV-Atofacilitaterapidadaptationofthebasemodeltothe isting works did not and provided a public dataset for future binary domain. Figure 8 (b) indicates that the model trained research on the current issue. We present FoC, a novel LLM- with Task3 shows lower training loss at the same steps. based framework for the analysis of cryptographic functions Contributions to Similarity Model. We have built FoC- in stripped binaries. Our evaluation results show that FoC-
Sim for generating function embeddings, which incorporates BinLLM can summarize function semantics in natural lan- various information from binary functions, including code guage, and outperforms ChatGPT by 14.61% on ROUGE- semantics, control structures, and cryptographic features. We L score. On the other hand, FoC-Sim achieves 52% higher further explore their contributions here. Recall@1 than previous methods on the cryptographic dataset We compare the performance of our FoC-Sim on the cryp- for the BCSD task, which compensates for the intrinsic tographic dataset by ablating each of the three information weakness of the prediction of our generative models. The sources. As shown in Table XII, we observe that the absence two components of FoC have shown practical ability in virus ofanyofthethreesourcesresultsinperformancedegradation. analysis and 1-day vulnerability detection. 10REFERENCES Intrusion Detection, R. Sommer, D. Balzarotti, and G. Maier, Eds. Berlin,Heidelberg:SpringerBerlinHeidelberg,2011,pp.41–60. [1] Hex-RaysSA,“IDAPro,”https://www.hex-rays.com/products/ida,2023. [18] L. Benedetti, A. Thierry, and J. Francq, “Detection of cryptographic [2] A. Al-Kaswan, T. Ahmed, M. Izadi, A. A. Sawant, P. Devanbu, and algorithms with grap,” Cryptology ePrint Archive, Paper 2017/1119, A.vanDeursen,“Extendingsourcecodepre-trainedlanguagemodelsto 2017, https://eprint.iacr.org/2017/1119. [Online]. Available: https:// summarisedecompiledbinarie,”in2023IEEEInternationalConference eprint.iacr.org/2017/1119 on Software Analysis, Evolution and Reengineering (SANER). IEEE, [19] J. Li, Z. Lin, J. Caballero, Y. Zhang, and D. Gu, “K-hunt: 2023,pp.260–271. Pinpointing insecure cryptographic keys from execution traces,” in [3] J.Xiong,G.Chen,K.Chen,H.Gao,S.Cheng,andW.Zhang,“Hext5: Proceedings of the 2018 ACM SIGSAC Conference on Computer Unifiedpre-trainingforstrippedbinarycodeinformationinference,”in and Communications Security, ser. CCS ’18. New York, NY, USA: 202338thIEEE/ACMInternationalConferenceonAutomatedSoftware Association for Computing Machinery, 2018, p. 412–425. [Online]. Engineering(ASE). IEEE,2023,pp.774–786. Available:https://doi.org/10.1145/3243734.3243783 [4] M.Chen,J.Tworek,H.Jun,Q.Yuan,H.P.deOliveiraPinto,J.Kaplan, [20] P. Kochberger and F. Seitl, “Detecting cryptography through ir visu- H. Edwards, Y. Burda, N. Joseph, G. Brockman, A. Ray, R. Puri, alization,” in 2018 International Conference on Software Security and G. Krueger, M. Petrov, H. Khlaaf, G. Sastry, P. Mishkin, B. Chan, Assurance(ICSSA),2018,pp.25–29. S. Gray, N. Ryder, M. Pavlov, A. Power, L. Kaiser, M. Bavarian, [21] R. Zhao, D. Gu, J. Li, and Y. Zhang, “Automatic detection and C.Winter,P.Tillet,F.P.Such,D.Cummings,M.Plappert,F.Chantzis, analysis of encrypted messages in malware,” in Information Security E.Barnes,A.Herbert-Voss,W.H.Guss,A.Nichol,A.Paino,N.Tezak, and Cryptology, D. Lin, S. Xu, and M. Yung, Eds. Cham: Springer J. Tang, I. Babuschkin, S. Balaji, S. Jain, W. Saunders, C. Hesse, InternationalPublishing,2014,pp.101–117. A. N. Carr, J. Leike, J. Achiam, V. Misra, E. Morikawa, A. Radford, [22] J.Li,L.Jiang,andH.Shu,“Binarycodelevelcyclicfeaturerecognition M.Knight,M.Brundage,M.Murati,K.Mayer,P.Welinder,B.McGrew, ofcryptographicalgorithm,”Computerengineeringanddesign,vol.35, D.Amodei,S.McCandlish,I.Sutskever,andW.Zaremba,“Evaluating no.8,pp.2628–2632,2014. largelanguagemodelstrainedoncode,”2021. [23] P.Lestringant,F.Guihe´ry,andP.-A.Fouque,“Automatedidentification [5] B. Wang and A. Komatsuzaki, “GPT-J-6B: A 6 Billion Param- of cryptographic primitives in binary code with data flow graph eter Autoregressive Language Model,” https://github.com/kingoflolz/ isomorphism,” in Proceedings of the 10th ACM Symposium on mesh-transformer-jax,May2021. Information, Computer and Communications Security, ser. ASIA CCS [6] S. Black, S. Biderman, E. Hallahan, Q. Anthony, L. Gao, L. Golding, ’15. New York, NY, USA: Association for Computing Machinery, H. He, C. Leahy, K. McDonell, J. Phang, M. Pieler, U. S. Prashanth, 2015,p.203–214.[Online].Available:https://doi.org/10.1145/2714576. S. Purohit, L. Reynolds, J. Tow, B. Wang, and S. Weinbach, 2714639 “GPT-NeoX-20B: An open-source autoregressive language model,” in [24] J. Calvet, J. M. Fernandez, and J.-Y. Marion, “Aligot: Cryptographic Proceedings of BigScience Episode #5 – Workshop on Challenges & function identification in obfuscated binary programs,” in Proceedings Perspectives in Creating Large Language Models. virtual+Dublin: of the 2012 ACM Conference on Computer and Communications Association for Computational Linguistics, May 2022, pp. 95–136. Security, ser. CCS ’12. New York, NY, USA: Association for [Online].Available:https://aclanthology.org/2022.bigscience-1.9 Computing Machinery, 2012, p. 169–182. [Online]. Available: https:
[7] F. F. Xu, U. Alon, G. Neubig, and V. J. Hellendoorn, “A systematic //doi.org/10.1145/2382196.2382217 evaluation of large language models of code,” in Proceedings [25] D. Xu, J. Ming, and D. Wu, “Cryptographic function detection in of the 6th ACM SIGPLAN International Symposium on Machine obfuscated binaries via bit-precise symbolic loop mapping,” in 2017 Programming, ser. MAPS 2022. New York, NY, USA: Association IEEESymposiumonSecurityandPrivacy(SP),2017,pp.921–937. for Computing Machinery, 2022, p. 1–10. [Online]. Available: [26] C.Meijer,V.Moonsamy,andJ.Wetzels,“Where’scrypto?:Automated https://doi.org/10.1145/3520312.3534862 identification and classification of proprietary cryptographic primitives [8] L. Luo, J. Ming, D. Wu, P. Liu, and S. Zhu, “Semantics-based in binary code,” in 30th USENIX Security Symposium, USENIX obfuscation-resilientbinarycodesimilaritycomparisonwithapplications Security 2021, August 11-13, 2021, M. Bailey and R. Greenstadt, tosoftwareandalgorithmplagiarismdetection,”IEEETransactionson Eds. USENIX Association, 2021, pp. 555–572. [Online]. Available: SoftwareEngineering,vol.43,no.12,pp.1157–1177,2017. https://www.usenix.org/conference/usenixsecurity21/presentation/meijer [9] L.Massarelli,G.A.DiLuna,F.Petroni,R.Baldoni,andL.Querzoni, [27] “CVE-2014-0160.”AvailablefromMITRE,CVE-IDCVE-2014-0160., “Safe: Self-attentive function embeddings for binary similarity,” in Dec. 3 2013. [Online]. Available: http://cve.mitre.org/cgi-bin/cvename. Detection of Intrusions and Malware, and Vulnerability Assessment, cgi?name=CVE-2014-0160 R.Perdisci,C.Maurice,G.Giacinto,andM.Almgren,Eds.,2019,pp. 309–329. [28] Y. Wang, W. Wang, S. Joty, and S. C. Hoi, “CodeT5: Identifier-aware unified pre-trained encoder-decoder models for code understanding [10] Y.Duan,X.Li,J.Wang,andH.Yin,“Deepbindiff:Learningprogram- widecoderepresentationsforbinarydiffing,”inProceedingsofthe2020 and generation,” in Proceedings of the 2021 Conference on NetworkandDistributedSystemsSecuritySymposium(NDSS),2020. Empirical Methods in Natural Language Processing, M.-F. Moens, X. Huang, L. Specia, and S. W.-t. Yih, Eds. Online and [11] X. Li, Q. Yu, and H. Yin, “Palmtree: Learning an assembly language Punta Cana, Dominican Republic: Association for Computational model for instruction embedding,” Proceedings of the 2021 ACM Linguistics, Nov. 2021, pp. 8696–8708. [Online]. Available: https: SIGSACConferenceonComputerandCommunicationsSecurity,2021. //aclanthology.org/2021.emnlp-main.685 [12] K.Pei,Z.Xuan,J.Yang,S.Jana,andB.Ray,“Learningapproximate execution semantics from traces for binary function similarity,” IEEE [29] G. Hill and X. Bellekens, “Cryptoknight: Generating and modelling TransactionsonSoftwareEngineering,vol.49,no.04,pp.2776–2790, compiled cryptographic primitives,” Information, vol. 9, no. 9, 2018. apr2023. [Online].Available:https://www.mdpi.com/2078-2489/9/9/231 [13] H. Wang, W. Qu, G. Katz, W. Zhu, Z. Gao, H. Qiu, J. Zhuge, and [30] X. Li, Y. Chang, G. Ye, X. Gong, and Z. Tang, “Genda: A graph C. Zhang, “jtrans: jump-aware transformer for binary code similarity embedded network based detection approach on encryption algorithm detection,” in Proceedings of the 31st ACM SIGSOFT International of binary program,” J. Inf. Secur. Appl., vol. 65, no. C, mar 2022. SymposiumonSoftwareTestingandAnalysis,2022,pp.1–13. [Online].Available:https://doi.org/10.1016/j.jisa.2021.103088 [14] polymorf, “findcrypt-yara,” https://github.com/polymorf/findcrypt-yara, [31] I.Guilfanov,“Findcrypt2,”https://hex-rays.com/blog/findcrypt2/,2006. 2022. [32] C. Zhao, F. Kang, J. Yang, and H. Shu, “A review of cryptographic [15] Sirmabus, “Ida signsrch,” https://github.com/nihilus/IDA Signsrch, algorithmrecognitiontechnologyforbinarycode,”JournalofPhysics: 2015. Conference Series, vol. 1856, no. 1, p. 012015, apr 2021. [Online]. [16] Z. Wang, X. Jiang, W. Cui, X. Wang, and M. Grace, “Reformat: Available:https://dx.doi.org/10.1088/1742-6596/1856/1/012015 Automatic reverse engineering of encrypted messages,” in Computer [33] OpenSSL,“Openssl,”https://github.com/openssl/openssl,2023. Security–ESORICS 2009: 14th European Symposium on Research in [34] X. Jin, J. Larson, W. Yang, and Z. Lin, “Binary code summarization: Computer Security, Saint-Malo, France, September 21-23, 2009. Pro- Benchmarkingchatgpt/gpt-4andotherlargelanguagemodels,”2023. ceedings14. Springer,2009,pp.200–215. [35] L.Ouyang,J.Wu,X.Jiang,D.Almeida,C.L.Wainwright,P.Mishkin, [17] F. Gro¨bert, C. Willems, and T. Holz, “Automated Identification of C. Zhang, S. Agarwal, K. Slama, A. Ray, J. Schulman, J. Hilton, Cryptographic Primitives in Binary Programs,” in Recent Advances in F. Kelton, L. Miller, M. Simens, A. Askell, P. Welinder, P. Christiano,
11J.Leike,andR.Lowe,“Traininglanguagemodelstofollowinstructions New York, NY, USA: Association for Computing Machinery, 2017, p. withhumanfeedback,”2022. 363–376.[Online].Available:https://doi.org/10.1145/3133956.3134018 [36] Y.Wang,H.Le,A.D.Gotmare,N.D.Q.Bui,J.Li,andS.C.H.Hoi, [54] S. H. H. Ding, B. C. M. Fung, and P. Charland, “Asm2vec: Boosting “Codet5+:Opencodelargelanguagemodelsforcodeunderstandingand static representation robustness for binary clone search against code generation,”2023. obfuscation and compiler optimization,” in 2019 IEEE Symposium on [37] E.Nijkamp,B.Pang,H.Hayashi,L.Tu,H.Wang,Y.Zhou,S.Savarese, SecurityandPrivacy(SP),2019,pp.472–489. andC.Xiong,“Codegen:Anopenlargelanguagemodelforcodewith [55] Y. Li, C. Gu, T. Dullien, O. Vinyals, and P. Kohli, “Graph matching multi-turnprogramsynthesis,”ICLR,2023. networks for learning the similarity of graph structured objects,” in [38] H. Husain, H. Wu, T. Gazit, M. Allamanis, and M. Brockschmidt, Internationalconferenceonmachinelearning. PMLR,2019,pp.3835– “Codesearchnet challenge: Evaluating the state of semantic code 3845. search,” CoRR, vol. abs/1909.09436, 2019. [Online]. Available: http://arxiv.org/abs/1909.09436 [39] Y. Li, D. Choi, J. Chung, N. Kushman, J. Schrittwieser, R. Leblond, T. Eccles, J. Keeling, F. Gimeno, A. Dal Lago et al., “Competition- levelcodegenerationwithalphacode,”Science,vol.378,no.6624,pp. 1092–1097,2022. [40] A.Marcelli,M.Graziano,X.Ugarte-Pedrero,Y.Fratantonio,M.Man- souri, and D. Balzarotti, “How machine learning is solving the binary function similarity problem,” in 31st USENIX Security Symposium (USENIXSecurity22),2022,pp.2099–2116. [41] J. Caballero, P. Poosankam, C. Kreibich, and D. Song, “Dispatcher: Enabling active botnet infiltration using automatic protocol reverse- engineering,”inProceedingsofthe16thACMconferenceonComputer andcommunicationssecurity,2009,pp.621–634. [42] M.Henderson,R.Al-Rfou,B.Strope,Y.-H.Sung,L.Luka´cs,R.Guo, S. Kumar, B. Miklos, and R. Kurzweil, “Efficient natural language responsesuggestionforsmartreply,”arXivpreprintarXiv:1705.00652, 2017. [43] A.Z.Broder,“Ontheresemblanceandcontainmentofdocuments,”in Proceedings.CompressionandComplexityofSEQUENCES1997(Cat. No.97TB100171). IEEE,1997,pp.21–29. [44] D.Kim,E.Kim,S.K.Cha,S.Son,andY.Kim,“Revisitingbinarycode similarity analysis using interpretable feature engineering and lessons learned,”IEEETransactionsonSoftwareEngineering,pp.1–23,2022. [45] Facebook,Inc.,“PyTorch,”https://pytorch.org,2023. [46] T. Wolf, L. Debut, V. Sanh, J. Chaumond, C. Delangue, A. Moi, P. Cistac, T. Rault, R. Louf, M. Funtowicz, J. Davison, S. Shleifer, P.vonPlaten,C.Ma,Y.Jernite,J.Plu,C.Xu,T.L.Scao,S.Gugger, M. Drame, Q. Lhoest, and A. M. Rush, “Transformers: State-of- the-art natural language processing,” in Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations. Online: Association for Computational Linguistics, Oct. 2020, pp. 38–45. [Online]. Available: https: //www.aclweb.org/anthology/2020.emnlp-demos.6 [47] microsoft, “Deepspeed,” https://github.com/microsoft/DeepSpeed/, 2023. [48] C.-Y.Lin,“ROUGE:Apackageforautomaticevaluationofsummaries,” inTextSummarizationBranchesOut. Barcelona,Spain:Associationfor Computational Linguistics, Jul. 2004, pp. 74–81. [Online]. Available: https://www.aclweb.org/anthology/W04-1013 [49] K.Papineni,S.Roukos,T.Ward,andW.jingZhu,“Bleu:amethodfor automaticevaluationofmachinetranslation,”2002,pp.311–318. [50] S. Banerjee and A. Lavie, “METEOR: An automatic metric for MT evaluation with improved correlation with human judgments,” in Proceedings of the ACL Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization, J. Goldstein, A. Lavie, C.-Y. Lin, and C. Voss, Eds. Ann Arbor, Michigan: Association for Computational Linguistics, Jun. 2005, pp. 65–72.[Online].Available:https://aclanthology.org/W05-0909 [51] A.Q.Jiang,A.Sablayrolles,A.Roux,A.Mensch,B.Savary,C.Bam- ford, D. S. Chaplot, D. de las Casas, E. B. Hanna, F. Bressand, G. Lengyel, G. Bour, G. Lample, L. R. Lavaud, L. Saulnier, M.-A. Lachaux, P. Stock, S. Subramanian, S. Yang, S. Antoniak, T. L. Scao, T.Gervet,T.Lavril,T.Wang,T.Lacroix,andW.E.Sayed,“Mixtralof experts,”2024. [52] N. Shalev and N. Partush, “Binary similarity detection using machine learning,” in Proceedings of the 13th Workshop on Programming LanguagesandAnalysisforSecurity,ser.PLAS’18. NewYork,NY, USA:AssociationforComputingMachinery,2018,p.42–47.[Online].
Available:https://doi.org/10.1145/3264820.3264821 [53] X. Xu, C. Liu, Q. Feng, H. Yin, L. Song, and D. Song, “Neural network-based graph embedding for cross-platform binary code similarity detection,” in Proceedings of the 2017 ACM SIGSAC ConferenceonComputerandCommunicationsSecurity,ser.CCS’17. 12
2403.18624 Vulnerability Detection with Code Language Models: How Far Are We? Yangruibo Ding†, Yanjun Fu♢, Omniyyah Ibrahim♮, Chawin Sitawarin‡, Xinyun Chen∇ Basel Alomair ♮,∗, David Wagner ‡, Baishakhi Ray†, Yizheng Chen♢ †Columbia University ∗University of Washington ♮King Abdulaziz City for Science and Technology ∇Google DeepMind ‡UC Berkeley ♢University of Maryland Abstract—Inthecontextoftherisinginterestincodelanguage In this paper, we aim to evaluate whether code LMs are models (code LMs) and vulnerability detection, we study the able to detect security vulnerabilities in real code, in settings effectiveness of code LMs for detecting vulnerabilities. Our representative of that needed for real-world use. This explo- analysisrevealssignificantshortcomingsinexistingvulnerability ration is anchored in the belief that while code LMs possess datasets,includingpoordataquality,lowlabelaccuracy,andhigh duplication rates, leading to unreliable model performance in remarkable potential, realizing their full capability to enhance realistic vulnerability detection scenarios. Additionally, the eval- software security necessitates a rigorous validation of their uation methods used with these datasets are not representative training and evaluation frameworks against the challenges of of real-world vulnerability detection. real-world software development. In particular, we scrutinize To address these challenges, we introduce PRIMEVUL, a new the datasets used to train code LMs and the benchmarks and dataset for training and evaluating code LMs for vulnerability detection. PRIMEVUL incorporates a novel set of data labeling metrics used to evaluate their effectiveness at vulnerability techniques that achieve comparable label accuracy to human- detection. verifiedbenchmarkswhilesignificantlyexpandingthedataset.It alsoimplementsarigorousdatade-duplicationandchronological Limitation of existing datasets and benchmarks. First, data splitting strategy to mitigate data leakage issues, alongside we meticulously analyze existing VD benchmarks [5, 8–11], introducing more realistic evaluation metrics and settings. This examining data collection methods, label accuracy, and preva- comprehensiveapproachaimstoprovideamoreaccurateassess- lenceofdataduplication.Ourinvestigationrevealscriticaldata ment of code LMs’ performance in real-world conditions. Evaluating code LMs on PRIMEVUL reveals that existing quality problems, that impact their effectiveness for training benchmarks significantly overestimate the performance of these and their suitability for evaluating code LMs. models. For instance, a state-of-the-art 7B model scored 68.26% Noisy labels: In VD literature, researchers typically la- F1 on BigVul but only 3.09% F1 on PRIMEVUL. Attempts to improveperformancethroughadvancedtrainingtechniquesand bel datasets either automatically or manually. Most large larger models like GPT-3.5 and GPT-4 were unsuccessful, with datasets [4, 5, 9] use automatic labeling because manual results akin to random guessing in the most stringent settings. labeling is too expensive. However, automatic labeling can These findings underscore the considerable gap between current introduce significant label noise. For instance, datasets like capabilities and the practical requirements for deploying code BigVul [9] curate hundreds of thousands of functions from LMs in security roles, highlighting the need for more innovative research in this domain. the real world and rely on vulnerability-fixing commits for labeling. However, they suffer from a flawed assumption I. INTRODUCTION that each function modified by such a commit corresponds In the evolving landscape of software development, code to a (separate) vulnerability. In practice, vulnerability-fixing language models (code LMs) have become pivotal in au- commits often fix one vulnerability but also make other tomating various software engineering tasks, fundamentally changes to surrounding code, and existing automatic labeling altering developers’ coding approaches [1]. Leading examples methods wrongly label that surrounding code as vulnerable. like GitHub Copilot [2] and Amazon CodeWhisperer [3] In contrast, manual labeling offers higher accuracy, but its have already integrated into real-world software development, cost means it can only be applied to smaller datasets. For significantly assisting developers in their daily work. Con- instance, the most accurate prior dataset, SVEN [12], which sequently, LM-based vulnerability detection (VD) has gained was manually labeled, covers only 9 Common Weakness traction, with researchers utilizing code LMs’ expanding ca- Enumerations(CWEs)andcomprisesonly1.6ksamples.This pabilities to autonomously identify security vulnerabilities in dichotomy presents a challenge: how to acquire high-quality codebases [4–7]. labeled VD data at scale for training code LMs to detect 1 4202 luJ 01 ]ES.sc[ 2v42681.3042:viXrasecurity vulnerabilities. of both vulnerabilities and coding patterns. Chronological Dataduplication: Furthermore,dataduplicationispreva- splitting also reduces the risks of data leakage. Second, we lent in these datasets. Our analysis identified significant levels introduce the Vulnerability Detection Score (VD-S), a novel of exact copies and cloned vulnerabilities within the datasets. metric designed to measure how well vulnerability detectors This is particularly problematic when one copy appears in the will perform in practice. VD-S measures the false negative trainingsetandanothercopyinthetestingset,asperformance rate, after the detector has been tuned to ensure the false metrics become unrepresentative of real-world performance positiverateisbelowafixedthreshold(e.g.,0.5%).Finally,we
and misrepresent the model’s ability to generalize to unseen introduce a pair-wise evaluation method to assess the model’s data. We found that up to 18.9% test samples are leaked from ability to distinguish between a vulnerable code sample and the train set in some benchmarks. its benign (fixed) counterpart, offering deeper insights into Limitation in existing evaluation metrics. Besides the data models’ vulnerability understanding. quality issue, the evaluation metrics used by current bench- Effectiveness of Code LMs in realistic vulnerability detec- marks fail to capture the practical utility of models at VD. tion.WeevaluatesevencodeLMswithvariedsizes,including Accuracy: Many benchmarks report accuracy scores, the state-of-the-art open-source models like StarCoder2 [13] but accuracy is not an appropriate metric for vulnerability andtheproprietarymodelsfromOpenAI[14],onPRIMEVUL. detection, because of the base rate problem (vulnerabilities Our findings paint a sobering picture of the current state of are rare in practice; most code is not vulnerable) and because code LMs in vulnerability detection. Across various models of mismatches in class balance (the proportion of vulnerable and experimental setups, code LMs consistently performed samples in most research datasets does not match the ratio of poorly, as measured on the PRIMEVUL benchmark. This is vulnerable code in real life). For instance, because most sam- in sharp contrast to prior evaluations on prior benchmarks, plesinrealisticbenchmarksarenotvulnerable,itispossibleto which reported seemingly good results. For example, Star- achieve high accuracy by always predicting “not vulnerable”. Coder2,whichpreviouslyshowedpromisingperformancewith A high accuracy score does not necessarily signify effective a 68.26% F1 score on BigVul, drastically underperformed detection of security vulnerabilities; it may simply reflect in our assessment, achieving only a 3.09% F1 score on the accurate identification of non-vulnerable cases, or a bias PRIMEVUL. towards predicting “not vulnerable”. Ourfindingshighlighttheineffectivenessofexistingmodels F1: WhiletheF1scoreiswidelyperceivedtobeabetter in vulnerability detection and the misleading nature of previ- metric for assessing classification performance on imbalanced ous benchmarks. This underscores the significance of PRIME- datasets, we argue that it is not appropriate in reality either. VUL in offering a more challenging and realistic evaluation The F1 score (the harmonic mean of precision and recall) environment, exposing the limitations of current models when reflects both false positives and false negatives by combining confronted with real-world vulnerabilities. Additionally, the them into a single penalty. Yet, for VD tools in practice, the introduction of the Vulnerability Detection Score (VD-S) and overwhelming majority of code is not vulnerable, so a critical pairwiseevaluationsfurtherelucidatedthechallengesfacedby challenge is preventing excessive false alarms. The F1 score the models. fails to reflect this asymmetry, so tools with a high F1 score Our attempts at enhancing model performance through may be useless in practice. advanced training techniques, such as class weights and con- Proposed solution. To address the above limitations, we trastive learning, resulted in marginal improvements at best. propose a new VD dataset and novel evaluation guidelines. We also evaluated state-of-the-art LLMs, including GPT-3.5 New dataset: We introduce PRIMEVUL to tackle the and GPT-4, where we employed zero-shot prompting and limitations of existing datasets through rigorous data collec- fine-tuning, hoping to leverage their massive pre-training and tion,datanormalization,anddatafilteringprocess.Wefurther model capacity for enhanced performance. However, even introduce two rigorous labeling techniques: (i) PRIMEVUL- these models cannot distinguish vulnerabilities from their NVDCHECK uses expert analysis from CVE entries, and (ii) similar yet benign counterparts. In our pair-wise evaluation, PRIMEVUL-ONEFUNC utilizes unique changes within com- GPT-4 with the chain-of-thought reasoning could not even mits, ensuring high label accuracy. Consequently, PRIMEVUL outperform the random guessing. This indicates that we are not only ensures better label accuracy but also significantly a long way from being able to usefully detect security vul- reduces the possibility of data duplication, thereby offering nerabilities with code language models, and fundamentally a more realistic and less noisy VD benchmark. To this end, new approaches may be needed. These findings also prompt a PRIMEVUL contains 6,968 vulnerable and 228,800 benign reassessmentofthebenchmarksused togaugeprogressinthe functions, covering 140 CWEs while maintaining similar ac- field,emphasizingtheroleofPRIMEVULinadvancingtoward curacy as SVEN, marking a substantial advancement in both more realistic and rigorous evaluations. scale and accuracy compared to previous datasets. To summarize, our contributions in this paper are as below: Novel evaluation guidelines: We propose novel evalua- tion guidelines, to ensure evaluation results will be predictive • We conducted an in-depth analysis of existing datasets, of the real-world performance of these tools. First, we sug- uncovering significant flaws, including poor data quality, gest splitting samples chronologically to reflect the evolution low label accuracy, and a high incidence of data duplica- 2tion. Unfortunately, even for the most experienced security experts, • We developed PRIMEVUL, a new vulnerability dataset manually verifying the vulnerability labels of code samples
with high-quality, accurately labeled data and stringent is challenging and time-consuming. Therefore, the manually de-duplication, to offer realistic training and testing data verified datasets, though having higher label accuracy, are not for code LMs. ideal for training the deep neural networks due to the limited • We introduced new evaluation guidelines, including the diversity (e.g., SVEN only covers 9 CWEs) and a limited Vulnerability Detection Score (VD-S) and a pair-wise number of samples (e.g., SVEN only has 1.6k samples). evaluation method, advancing the rigor of model assess- ment for vulnerability detection. B. Label Accuracy • WeevaluatedarangeofcodeLMswith PRIMEVUL.Our Inthissection,wequantifythelabelaccuracyoftheexisting evaluation demonstrates that, despite various attempts benchmarks. at optimization, the performance of current models sig- 1) Experiments: We analyze the label accuracy by ran- nificantly falls short of the requirements for real-world domlysampling50vulnerablefunctionsfromeachbenchmark deployment, highlighting a pressing need for innovative and manually analyzing whether the function indeed contains approaches in the training of code LMs for vulnerability security vulnerabilities. The manual analysis was performed detection. We release our artifact at: https://github.com/ bythreeoftheauthors,includingtworesearcherswithseveral DLVulDet/PrimeVul years of experience in computer security and one senior II. BACKGROUND&CHALLENGES security expert. In this section, we will revisit the existing code-LM-based Human Judgement. We follow the same method from Chen vulnerability detection models and study the core problems et al. [5] to analyze whether each function is vulnerable and that prevent them from being promising and reliable for categorize nonvulnerable functions accordingly. Our human realistic deployment. annotators comprehensively check the commit message that changedthesampledfunction,thefunctionbeforeandafterthe A. Existing Data Collection Methods commit, the affiliated CVE, the NVD description, as well as Avastamountofhigh-qualitydataisthekeytosuccessfully thediscussionsamongthedevelopersinsecurityissuetrackers train deep neural networks. Earlier studies [15, 16] train if available. Using these information, our human annotators deep-learning models using synthetic datasets such as SATE confirm 1) whether the commit is related to fixing a security IV Juliet [17] and SARD [18]. However, synthetic datasets vulnerability, and 2) whether the sampled function is indeed do not adequately capture the complex and nuanced nature vulnerable. of vulnerabilities in real-world code [4]. To address this, a Fornonvulnerablefunctions,wecategorizetheminto1)vul- series of benchmarks collect vulnerabilities from real-world, nerability spread across multiple functions, 2) other changes open-source software repositories [4, 5, 8, 9, 19]. Existing to make the fix consistent (relevant, but not vulnerable), and vulnerability datasets suffer from one of the two following 3) irrelevant changes. In the first category, common scenarios issues:(1)automatedlabelingischeapbuttoocoarse-grained, where we cannot tell whether a single function is vulnerable and (2) manual labeling is reliable but labor-intensive. We include examples such as race condition, denial of service, or elaborate on this trade-off below. commandinjectionthatexploitsmultiplefunctions,etc.Inthe Automated Labeling. The first strategy uses heuristics to second category, we often observe changes to nonvulnerable automatically label all the samples so that the size of the functions as a side-effect of fixing vulnerable functions, e.g., dataset can be large enough for training deep neural net- reportingmoreerrorsoutsidethepatchedfunction,orchanging works. For example, BigVul [9], ReVeal [4], CrossVul [11], the ways of calling the patched function. In the last category, CVEfixes [10], and DiverseVul [5] follow this strategy. First, themostcommoncasesarechangingspacingandfunctionality they collect vulnerability-fixing commits from open-source while fixing a different vulnerable function. resources, such as the NVD database, Bugzilla, and Debian Majority Vote. As an improvement over Chen et al. [5], we security trackers. Then, they label the before-commit version use three human annotators with majority vote labeling for all of changed functions as vulnerable, and their after-commit datasets except SVEN. We label a function as vulnerable if version and unchanged functions as nonvulnerable. This strat- at least two out of the three human annotators agree that the egyassumesthatthevulnerability-fixingcommitsonlychange functionisvulnerable.Ifanyoneofthethreelabelersisunsure vulnerablefunctionstofixthesecurityflaw.Inourlabelnoise about a function, the security expert will lead a discussion analysis (Section II-B), we find that this is often not the case, among annotators to achieve an agreement. Then, the final which introduces wrong labels for vulnerable functions. decision will be made through a majority vote. Manual Labeling. To improve the quality of the data labels, 2) Results: Table I summarizes our analysis results. We the second strategy is to involve human experts to verify the conduct accuracy analysis over three benchmarks that were commits or functions manually. For example, the authors of originallyconstructedwithsomekindofmanuallabelingfrom SVEN[12]manuallylabeled1,606functions(halfofthembe- prior work: CodeXGLUE, SVEN, and VulnPatchPairs. For ingvulnerable),fromnoisydatasetslikeBigVulandCrossVul. benchmarks that use automated labeling, specifically, BigVul, 3TABLE I: The label accuracy across existing vulnerability vulnerable samples in the training data have a wrong label,
benchmarks and their comparison with two PRIMEVUL auto- the quality of the trained model is rather concerning [21]. matedlabelingtechniques.Resultsshowthatournewlabeling Due to the space limit, we provide a more detailed label techniques achieve a high accuracy on par with SVEN which error analysis in the Supplementary Material I.A. requiresmanuallabeling.Moreover,PRIMEVULcontains16.7 × as many vulnerable C/C++ functions as in SVEN. C. Data Leakage Data leakage has been identified as a significant issue in Benchmark Manual Correct (%) the area of machine learning for code. We consider two types SVEN [12] ✓ 94.0 of leakage: code copy and time travel. Realistic evaluation CodeXGLUE [8, 19] ✓ 24.0 VulnPatchPairs [20] ✓† 36.0 of vulnerability detection models requires no data leakage to ensure their performances are reasonably measured. We BigVul [9] ✗ 25.0∗ CrossVul [11] ✗ 47.8∗ study the data leakage issue of four most frequently used CVEFixes [10] ✗ 51.7∗ vulnerability benchmarks: BigVul, CVEFixes, CodeXGLUE, DiverseVul [5] ✗ 60.0∗ and DiverseVul, and they have been used by more than ten PRIMEVUL-ONEFUNC ✗ 86.0 code-LM-based vulnerability detection models as training and PRIMEVUL-NVDCHECK ✗ 92.0 evaluation material [5–7, 19, 22–27]. † The VulnPatchPairs dataset takes pairs of functions from Data Splits. To study the data leakage issue specifically in CodeXGLUE. The dataset does not involve further manual the setting of fine-tuning for vulnerability detection, we need verification beyond its data resource, CodeXGLUE. to create the train/validation/test splits. For CodeXGLUE, we ∗ Refers to label accuracy numbers in Chen et al. [5]. directly take the original split from the benchmark [28]. For BigVul, we take the public split [29] used by [6, 7, 27]. For CVEFixes and DiverseVul, we follow the methodology intro- CrossVul, CVEFixes, and DiverseVul, we refer to label accu- duced in [5] to randomly split the data with 80%/10%/10%. racy numbers in Chen et al. [5]. We will publicly release our 1) Code Copy: One main reason for leakage is data dupli- label accuracy analysis results as part of our artifacts. cation [30] since code data is highly repetitive [31, 32], and As shown in Table I, the benchmarks without manual LMs are known to be good at memorizing the code text [33]. verification have very low accuracy between 25% and 60%, Specifically, leaking exact copies across the training and eval- for vulnerable functions. On the other hand, only one out uation set will inevitably inflate the evaluation performance. of the three prior datasets that used manual labeling method has a high accuracy: SVEN has a 94% label accuracy for Experiments. We study the exact copy of vulnerable func- vulnerable functions. The other two datasets, CodeXGLUE tions. To identify the code copy, we exhaustively compare the and VulnPatchPairs have low accuracy. vulnerable functions in the test set to all training samples. We normalize the formatting characters in the code samples to Surprisingly, the widely used dataset CodeXGLUE [8] eliminate its noisy effect and then identify the exact copy if (a.k.a. Devign [19] dataset) has a label accuracy of only two strings are identical after the normalization. 24% for vulnerable functions, even though Zhou et al. [19] had recruited human annotators to label security-related com- TABLE II: The statistics of data duplication in existing vul- mits. We find that their human annotation on security-related nerability detection benchmarks. commits is highly inaccurate, such that many commits in CodeXGLUE do not fix security vulnerabilities. Moreover, Benchmark De-dup Copy (%) they adopt the same automated labeling policy as BigVul [9] BigVul [9] ✗ 12.7 to label all changed functions as vulnerable. The VulnPatch- CVEFixes [10] ✗ 18.9 Pairs [20] dataset is derived from CodeXGLUE, and as a CodeXGLUE [8] ✗ 0.6 result,itisalsoinaccurate,withalabelaccuracyofonly36%. DiverseVul [5] ✓ 3.3 Our labeling policy is more stringent than VulnPatchPairs. PRIMEVUL (Ours) ✓ 0.0 Similar to CodeXGLUE, we find that 24% of functions from VulnPatchPairs are changed by some security-relevant com- Results. Table II reveals that existing benchmarks suffer mits, but the functions are not vulnerable. The vulnerability from significant exact copy, up to 18.9% of samples being is either spread across multiple functions or the function is duplicated. Unfortunately, this simple step is overlooked by changed to make the fix consistent. prior work. Interestingly, we notice that, with hash-based ThemostaccuratedatasetfrompriorworkisSVEN,which deduplication,DiverseVulstillhas3.3%copies.Thisismainly has only 803 vulnerable functions (total ∼1.6k of vulnerable because they did not normalize formatting characters, and the and non-vulnerable functions) across nine CWEs. In compar- same code with varied spacing will be mapped to distinct ison, the other noisy datasets from Table I are much larger. MD5 hashes, failing to be identified as copies. Further, we Previous works have used different noisy datasets to fine-tune manually check the root cause of such duplication, but we codeLMs.CodeLMstrainedwiththesenoisydatasetscannot realize the reasons are varied. For example, in CodeXGLUE, be trusted for realistic deployment. When nearly half of the we notice two different fixing commits targeting the same 4CVE 1 2. Consequently, this vulnerability gets sampled twice resultinapatchedversionofafunctionthatistextuallysimilar while one is in the training set and the other is in the test set. toitsvulnerablecounterpart.CodeLMs,whichprimarilyana-
Differently,inBigVul,weidentifiedexactlythesamefunctions lyzethetextualrepresentationofcode,struggletodifferentiate thatbeingsampledtwicebeforeandaftercommits3 4,having between such closely related versions [20, 34, 35]. Only by contradictinglabels.Suchnoiseswillconfusethemodelduring evaluating models on these paired data, we can expose this training and hurt the model’s performance as a result. weakness of code LMs. 2) Time Travel: Existing datasets also have the issue of III. OURBENCHMARK:PRIMEVUL time travel since they randomly separate functions into train, validation, and test sets. Consequently, it is possible to train As demonstrated in Section II-B, existing vulnerability on future data and test on past data. It is also possible to have benchmarks suffer from two major data quality issues: data the fixed nonvulnerable function in the training set, and the duplicationandlowlabelaccuracy.Toaddresstheseshortcom- older vulnerable function in the test set. ings, we introduce a new vulnerability benchmark, PRIME- In addition, after manual analysis, we find that many code VUL.Weproposeanewautomateddatacollectionpipelineto samples from the same commit (example2) were randomly produce high-quality samples with accurate labels. separatedintotrain,validation,andtestsets.Thiscommitfixes A. Data Merging and Thorough Data De-duplication multiple occurrences of the same CVE in different functions, To build a large database, we start by merging all in the same way. In general, developers tend to fix similar security-relatedcommitsandfunctionschangedbythemfrom issues altogether before merging the changes into the main BigVul[9],CrossVul[11],CVEfixes[10],andDiverseVul[5]. branch. However, training and testing with samples from the We exclude data from Devign/CodeXGLUE [8, 19] because samecommitisunrealisticandleaksinformationfromthetest we discover that a large portion of its commits is unrelated to time to the training time. In a realistic setting, the models are security issues during our annotation process. trained on the historical data to predict future samples. We de-deduplicate code copies as well as functions with D. Evaluation in Practical Settings only formatting differences. For each commit, we first nor- Nearly all code-LM-based vulnerability detection models malize the changed functions before and after commits by use accuracy and F1 score for evaluation. Accuracy measures stripping away characters such as spaces, tabs (“\t”), newline theproportionofcorrectlypredictedsamples(bothvulnerable characters (“\n”), and carriage return characters (“\r”). Then and benign) out of all samples, capturing overall model we compute the MD5 hash of both the pre- and post-commit performance. The F1 score, on the other hand, is a harmonic versions of the changed function. If the pre- and post-commit mean of precision and recall, offering a balanced measure versionsofafunctionresultinidenticalhashvalues,weregard of the model’s ability to identify vulnerable samples without thisfunctionasunchangedanddiscardit.Finally,wecombine excessively misclassifying benign samples. We argue that allremainingfunctions,andfurtherde-duplicatethewholeset accuracyandF1failtocapturepropertiesthatdeveloperscare by the MD5 hashes of the normalized functions. During the about in practice. Rather, developers are generally concerned de-duplication,wemaintainauniquesetofhashes.Ifthehash about (1) the detection error tradeoffs and (2) discriminative of a normalized function is already in the set, we exclude it power over textually similar code samples across vulnerable from further processing. and fixed patches. As a result, PRIMEVUL’s thorough data de-duplication 1) Detection Error Tradeoffs: Balance between false posi- ensures that no vulnerable function from the training set can tivesandfalsenegativesiscriticalwhendeployingvulnerabil- be leaked to the test set (Table II). ity detection tools. Developers rely on these tools not only to B. More Accurate Data Labeling catchasmanyrealvulnerabilitiesaspossible(minimizingfalse negatives) but also to do so without overwhelming them with We propose two new labeling techniques: PRIMEVUL- falsealarms(minimizingfalsepositives).Thisdualexpectation ONEFUNC and PRIMEVUL-NVDCHECK. reflectsthepracticaltradeoffsinsoftwaredevelopment:missed PRIMEVUL-ONEFUNC: We notice that the previous labeling vulnerabilities can lead to security breaches, while too many methodhaserrorsparticularlywhendealingwithcommitsthat false alarms can lead to alert fatigue, potentially causing real modify multiple functions. Therefore, PRIMEVUL-ONEFUNC issues to be ignored. F1 and accuracy offer little insight into regards a function as vulnerable if it’s the only function the tradeoff between false negatives and positives. changed by a security-related commit. 2) Textually Similar Pairs of Vulnerable & Corresponding PRIMEVUL-NVDCHECK: Since human experts have an- Patch: Security fixes could involve only minor modifications alyzed the CVEs in the NVD database, the vulnerability tothecodeinmanycases,suchasadjustingbuffersizes,fixing description in each CVE entry is a reliable reference to label data types, or adding security checks. These changes often vulnerablefunctions.WedevelopPRIMEVUL-NVDCHECKas the following. First, we link security-related commits to their 1https://github.com/qemu/qemu/commit/71d0770 CVE numbers and the vulnerability description in the NVD 2https://github.com/qemu/qemu/commit/902b27d 3https://github.com/php/php-src/commit/3798eb6 database. We label a function as vulnerable if it satisfies one
4https://github.com/torvalds/linux/commit/6062a8d of the two following criteria: (1) NVD description explicitly 5mentions its name, or (2) NVD description mentions its file as possible (can be measured by False Negative Rate, or name, and it is the only function changed by the security- FNR, where we expect low FNR). Meanwhile, from a related commit in that file. practical perspective, a certain level of false positives can be After applying our two labeling techniques, we obtain two manageablewithinthedevelopmentworkflow,withoutcausing sets of vulnerable functions. Next, we merge the sets and de- alert fatigue (typically captured by False Positive Rate, or duplicate the functions again. We normalize the formatting FPR, where a lower rate is better). Therefore, a metric that characters in the functions, and compute their MD5 hashes focusesonminimizingthefalsenegativeratewithinatolerable to identify and remove duplicates. Subsequently, we label the level of false positives is essential. post-commit versions of these identified vulnerable functions, Tothisend,weproposeVulnerabilityDetectionScore(VD- as well as all other unchanged functions within the same S), that evaluates the False Negative Rate of a vulnerability commits,asbenign.Onlyasubsetofcommitsfromthemerged detector within an acceptable False Positive Rate, i.e., FNR database mentioned in Section III-A, meet the criteria for @ (FPR ≤ r), where r ∈ [0%,100%] is a configurable labelingbyourtechniques.Commitswithoutanyfunctionthat parameter. In this paper, we choose a tolerance rate r =0.5% matches these criteria are excluded from our dataset. to perform the evaluation in Section V. Our pipeline results in a collection of 6,968 vulnerable and 2) Paired Functions and Pair-wise Evaluation: As dis- 228,800 benign functions across 755 projects and 6,827 com- cussed in Section II-D2, evaluating the models on paired mits. To assess our labeling accuracy, we conducted a manual functions—vulnerable and benign versions of code—could review following the same process used in Section II-B, with potentially reveal whether a model merely relies on super- ourresultspresentedinTableI.Themostaccuratepriordataset ficial text patterns to make predictions without grasping the SVEN has only 417 vulnerable functions in C/C++, and 386 underlying security implications, indicating areas where the vulnerable functions in Python. Our PRIMEVUL dataset not model needs improvement to reduce the false positives and onlymatchesthelabelaccuracyofSVENbutalsosignificantly false negatives. expands the collection of vulnerable C/C++ functions by We collected 5,480 such pairs in PRIMEVUL, significantly 16.7× compared to SVEN. PRIMEVUL is diverse, containing larger than existing paired datasets [12, 20]. Concretely, we 140 CWEs (15.6× of SVEN). match the vulnerable functions with their patches in PRIME- TABLE III: Statistics of PRIMEVUL VUL to construct such pairs. As we show in Table III, the paired vulnerable functions are fewer than all vulnerable All Paired functions,sincenotallvulnerablefunctionshaveapatch(e.g., Split a patch could delete the vulnerable function), and we only # Vuln. # Benign # Vuln. # Benign include those challenging pairs sharing at least 80% of the Train 5,574 178,853 4,354 4,354 string between the vulnerable and benign version. Dev 699 24,731 562 562 Accordingly, we also propose a pair-wise evaluation Test 695 25,216 564 564 method.Thecoreideaistoevaluatethemodel’spredictionson the entire pair as a single entity, emphasizing the importance IV. NEWEVALUATIONGUIDELINES of correctly identifying both the presence and absence of AsdiscussedinSectionII-C,weneednewmethodstoprop- vulnerabilities in a textually similar context, while recording erly evaluate vulnerability detection models in deployment the model’s concrete predicting behaviors. settings. This section proposes new evaluation guidelines. We define four outcomes of the pair-wise prediction: • Pair-wise Correct Prediction (P-C): The model correctly A. Temporal Splits predicts the ground-truth labels for both elements of a pair. Tominimizethedataleakageissueandformulatearealistic • Pair-wise Vulnerable Prediction (P-V): The model incor- train-evaluate setup for vulnerability detection, we split the rectly predicts both elements of the pair as vulnerable. train/validation/testsetofPRIMEVULaccordingtothecommit • Pair-wise Benign Prediction (P-B): The model incorrectly date of the samples. Concretely, we find the original commit predicts both elements of the pair as benign. for each sample and collect the time of that commit, tying • Pair-wiseReversedPrediction(P-R):Themodelincorrectly it with the sample. Then, we sort the samples according to and inversely predicts the labels for the pair. the commit, where the oldest 80% will be the train set, 10% V. EXPERIMENTALRESULTS in the middle will be the validation set, and the most recent 10% will be the test set. We also make sure that the samples Considering the better data labeling and closer alignment fromthesamecommitwillnotbesplitintodifferentsets.This with the real-world data distribution in PRIMEVUL, coupled ensures that the vulnerability detection model is trained using with the introduction of new evaluation techniques, we re- past data and tested over future data. assess code LM’s performance using PRIMEVUL to gauge their performance in a more realistic setting. To this end, we B. More Realistic and Challenging Evaluation evaluate the following three Research Questions: 1) Vulnerability Detection Score: The primary goal in • RQ1. How do open-source code LMs perform on PRIME- vulnerability detection is to catch as many real vulnerabilities VUL? (Section V-B)
6• RQ2. Can employing more advanced training techniques BigVul[9],usingwhichasadirectcomparisontoPRIMEVUL. enhance the performance of code LMs in detecting vulner- Specifically, we additionally fine-tune code LMs on BigVul, abilities? (Section V-C) and evaluate them on both BigVul and PRIMEVUL. • RQ3. Can larger language models (LLMs) improve vulner- Results.TheempiricalresultsareshowninTableV.Therows ability detection performance? (Section V-D) of “Train=PV” and “Test=PV” in each section are results for A. Study Subject code LMs fine-tuned and evaluated on PRIMEVUL, and rows of“Train=BV”arethecomparativeresultsforcodeLMsfine- Datasets. We mainly use PRIMEVUL to conduct our experi- tuned with BigVul. ments.InRQ1,weadditionallyuseBigVul[9]asacasestudy of existing benchmarks due to its popularity [5–7, 27, 29]. TABLE V: Performance of code-LM-based vulnerability de- By comparing it to PRIMEVUL, we illustrate the impact of tection models in different settings. its limitation on training and evaluating code-LM-based VD models. For RQ2 and RQ3, we will focus on PRIMEVUL to Model Train Test Acc↑ F1↑ VD-S↓ P-C↑ P-V↓ P-B↓ P-R↓ improve code LMs performances for VD. BV 95.67 64.93 77.30 24.98 50.90 22.79 1.33 BV CT5 PV 97.00 5.82 95.97 0.18 3.01 96.10 0.71 TABLE IV: The code LMs we will study in this section. PV PV 96.67 19.7 89.93 1.06 12.94 84.75 1.24 Model Parameters Arch Methods BV 95.57 62.88 81.77 22.60 48.34 27.83 1.23 BV CB PV 97.04 4.49 95.54 0.35 1.95 96.99 0.71 CODET5[25] 60M Enc-Dec Fine-tune PV PV 96.87 20.86 88.78 1.77 11.35 86.17 0.71 CODEBERT[22] 125M Encoder Fine-tune BV 96.46 65.46 62.30 39.60 23.74 33.24 3.42 UNIXCODER[24] 125M Encoder Fine-tune UC BV PV 97.27 1.94 95.11 0.35 0.35 98.76 0.53 STARCODER2[13] 7B Decoder Fine-tune PV PV 96.86 21.43 89.21 1.60 12.06 85.11 1.24 CODEGEN2.5[36] 7B Decoder Fine-tune BV 96.20 68.26 69.14 35.23 41.98 20.61 2.18 Fine-tune SC2 BV PV 97.09 3.09 96.83 0.89 0.89 97.70 0.53 GPT-3.5[37] >100B Decoder Few-shotPrompt Chain-of-thought PV PV 97.02 18.05 89.64 2.30 8.16 88.30 1.24 GPT-4[14] >100B Decoder F Ce hw ai- ns -h oo ft -tP hr oo um ghp tt CG2.5 BV B PVV 9 96 7. .5 27 3 6 17 .. 93 10 6 91 5. .7 63 8 4 10 .. 28 44 2 06 .. 00 02 2 99 8. .6 73 6 3 0. .5 01 0 PV PV 96.65 19.61 91.51 3.01 10.82 84.22 1.95 Models. We will study seven code LMs with varied sizes CT5:CodeT5,CB:CodeBERT,UC:UnixCoder,SC2:StarCoder2,CG2.5: regarding their capabilities for VD, as shown in Table IV. CodeGen2.5.BV:BigVul,PV:PRIMEVUL.VD-SistheVulnerability Specifically, we will fine-tune all open-source models in RQ1 DetectionScoredefinedasfalsenegativerate(FNR)@falsepositiverate and study the advanced training techniques using UnixCoder (FPR)≤0.5%inSectionIV-B1.P-C,P-V,P-B,andP-Rarethemetrics definedinSectionIV-B2toevaluatethemodelsonpairedfunctions. in RQ2. In RQ3, we prompt GPT-3.5 and GPT-4 with two settings:(1)two-shotexamples(2)andchain-of-thoughts[38] Finding-RQ1.1: Code LMs’ performance is overestimated reasoning, and we also fine-tune GPT-3.5 on a subset of PRIMEVUL using the OpenAI API. on a prior benchmark and perform poorly on PRIMEVUL: ThecomparativeperformanceevaluationofcodeLMsbetween Experimental Settings.Forallexperimentswithopen-source PRIMEVUL and BigVul lays bare a startling truth: the profi- models, we implement our fine-tuning framework following ciencyofthesemodelsisgreatlyoverestimatedbybenchmarks theexistingbenchmarks[5,8],wherewewillusethelearning like BigVul, which fail to mimic the complexity of real- rate of 2×10−5 for all the fine-tuning. For smaller models world vulnerabilities. For example, while StarCoder2 shows withlessthan7Bparameters,wewillfine-tunethemodelsfor a commendable F1 score of 68.26% on BigVul, it plummets 10 epochs, and for 7B models, we fine-tune for four epochs. to a paltry 3.09% on PRIMEVUL. This precipitous drop is For GPT-3.5, we just fine-tune for one epoch, due to the not an isolated case but a trend observed across all models, limited budget. We load the model weights from Hugging exemplified by the observed false negative rates. Face Models5. All training tasks are conducted on a cluster Inaddition,evenwhencodeLMsarefine-tunedon PRIME- withNVIDIAA100GPUs(80GB).ExperimentswithOpenAI VUL, they fail to achieve the same level of performance as models are performed through API using greedy decoding. on BigVul. For instance, when trained on PRIMEVUL, Star- B. RQ1: Performance of Open-Source Code LMs on PRIME- Coder2’s performance shows only a modest improvement in VUL F1scorefrom3.09%to18.05%,whichisstillmarkedlylower than the 68.26% F1 score achieved on BigVul. This persistent InthisRQ,wefine-tuneopen-sourcecodeLMsandevaluate underperformance, even after fine-tuning, suggests that the their performances on PRIMEVUL. To illustrate the limita-
models cannot effectively learn from the more complex and tions of existing VD benchmarks on the model training and evaluation, we reproduce the code LMs’ performances on realistic distribution of vulnerabilities in PRIMEVUL. This stark discrepancy confirms that code LMs trained on existing 5https://huggingface.co/models benchmarks may develop a false sense of security due to the 7benchmarks’ failure to capture the intricate and diverse nature C. RQ2: Exploring to Improve the Performance on PRIME- of vulnerabilities found in the wild. VUL GivencodeLMs’poorperformanceon PRIMEVUL,wede- Finding-RQ1.2:VulnerabilityDetectionScore(VD-S)offers cidedtodelvedeeperintothetrainingprocess,tryingtofigure a more concrete sense of realistic performance: The VD- out whether more advanced training techniques could help S emerges as a pivotal metric, capturing the essence of a code LMs achieve promising performance on PRIMEVUL. model’s capability in real-world settings, where balancing the To this end, we perform analysis to inspect both the chal- FNR and FPR is crucial. For example, CodeBERT reports a lenging samples within PRIMEVUL and monitor the models’ high accuracy of 96.86% and a satisfactory F1 of 62.88% on behaviors during the inference time, carefully studying the BigVul. Relying on these metrics, CodeBERT seems to be an failingpredictionsfromthesemodels.Aftertheseanalysesand acceptablecandidatefordetectingvulnerabilities.However,its a comprehensive literature review, we decided to explore two astonishing false negative rate of 81.77% (as reported by VD- advanced training techniques that have shown effectiveness in S) far exceeds the realistic tolerable limits, revealing that it is helping binary classifications. actuallyuselessinpractice,overturningtheconclusionsdrawn 1) Exploration-1: Class Weights: When we delve deeper fromtheaccuracyandF1.CodeLMs’VD-SonPRIMEVULis into analyzing the experimental results, one of the notable even more concerning than on BigVul, highlighting the mod- els’ limitations in accurately identifying true vulnerabilities, differences between BigVul and PRIMEVUL is the ratio of vulnerable samples. As we have mentioned in Section II-B, a a critical factor for real-world applications where missing a lot of samples are mislabeled as vulnerable in BigVul and single vulnerability can have serious repercussions. other resources that constitute PRIMEVUL. After applying In addition, we realize that VD-S is not necessarily cor- our novel labelers, (Section III-B), the ratio of vulnerabil- related with accuracy or F1. The detachment of VD-S from ities significantly decreases. Therefore, we suspect whether traditional metrics also signals a shift in how we should the significantly more imbalanced ratio hinders the learning evaluate code LMs for VD. process [39]. Toverifyourassumptions,weimplementtheweightedloss Finding-RQ1.3: Code LMs are weak at differentiating vul- similar to Chen et al. [5] and integrate it into our fine-tuning nerabilities from their similar but benign counterparts: As framework. The general idea is to give a higher penalty when we have discussed in Section II-D2 and IV-B2, pair-wise models make mistakes on the rare class (i.e., the vulnerable evaluation not only offers a lens into the precision of code samples) by up-weighting the loss value for this class in the LMs but also serves as a stress test for their real-world cross entropy loss. With a higher weight on the rarer class, application viability. These metrics crucially reveal whether the imbalance ratio will be less harmful, since the model a model has truly learned to identify security vulnerabilities pays comparable attention to both classes when optimizing or is merely recognizing patterns without comprehending the the loss. To find an optimal weight, we tried several different implications—a distinction that’s vital for the deployment of values: besides the standard binary classification with equal code LMs as a reliable security tool. weights (weight of vulnerable class = 1), we further explored However, our pair-wise evaluation uncovers a significant upweighting the loss for the vulnerable class for 5, 20, 30, deficiency in this aspect. Our results reveal that code LMs and 40 times. Note that the vulnerable to benign ratio in frequently misclassify both functions in a pair as vulnerable PRIMEVUL is roughly 1:32. (P-V) or benign (P-B), indicating an overreliance on textual patterns rather than a substantive understanding of the code’s TABLE VI: The impact of class weights during training. security context. For instance, StarCoder2 reports only 2.30% cases to correctly label both elements in paired functions Weight Acc↑ F1↑ VD-S↓ P-C↑ P-V↓ P-B↓ P-R↓ while misclassifying 4× more cases as both vulnerable and 1 96.86 21.43 89.21 1.60 12.06 85.11 1.24 88.30% pairs as both benign, demonstrating their difficulty in 5 96.24 25.29 90.65 1.77 18.97 78.55 0.71 recognizingthevulnerablepatternsandsubtledistinctionsthat 20 95.28 24.26 88.92 0.89 25.71 72.16 1.24 apatchcanintroduce.Suchunreasonablebehaviorsundermine 30 96.14 24.49 90.07 2.13 18.09 78.01 1.77 40 95.99 26.28 88.49 1.42 22.52 74.82 1.24 the trustworthiness of these models in realistic deployment. Thisinsightisalsocrucialasitemphasizestheneedformodels Findings-RQ2.1: Class weights do not fundamentally im- that go beyond surface-level text comprehension to grasp prove code LMs’ performance on PRIMEVUL: Similar to deeper semantic implications of code changes for reliable Chen et al. [5], we observed an increased F1 when applying vulnerability detection. class weights, though not as significant as theirs due to the
difficulty of PRIMEVUL. However, VD-S scores warn us Result-1: Code LMs’ significant underperformance on that such a marginal improvement is far from promising in PRIMEVUL highlights their limitations when faced with the realistic scenario: FNR is oscillating around 90% across realistic, diverse, and challenging vulnerabilities. different weights, and such models could not be trusted to detect security flaws. These results, though disappointing, 8help us exclude the potential impact of the class imbalance, Result-2: Advanced training techniques for binary clas- providing convincing evidence regarding the difficulty of sification, such as contrastive learning and class weights, PRIMEVULandthestruggleofcodeLMsindetectingrealistic couldnotparticularlyimprovecodeLMs’performanceon vulnerabilities. PRIMEVUL, highlighting its realistic difficulty. 2) Exploration-2: Contrastive Learning: Contrastive learn- ing has been proven effective at learning better-quality repre- D. RQ3: Larger Code LMs on PRIMEVUL sentations of text and code [24, 34, 40] since they are able After getting unsatisfying results with advanced training to decrease the cosine similarity among semantically different techniques,westartedtoquestionwhetherthemodelswetried samples,andconsequentlyhelptoimprovethemodels’perfor- so far have too few parameters to solve such a challenging mance in downstream classification tasks. Therefore, we hope benchmark. Therefore, we decided to explore the state-of-the- to study whether contrastive learning could help code LMs to art large language models (LLM) to see whether significantly achieve promising performance on PRIMEVUL. more parameters could bring a performance. Different from the existing works [24, 34] which apply We perform experiments using OpenAI GPT models: GPT- contrastivelearningatthepre-trainingphase,weenforcesuch 3.5 and GPT-4. Considering the cost, we only evaluate these “contrasting”signalstogetherwiththeclassificationobjective. models on the paired functions of PRIMEVUL, since it has Specifically, code LMs will be fine-tuned to both classify vul- much fewer samples than the full set while representing nerabilities and contrast representations with distinct seman- the more challenging scenarios in reality. For GPT-3.5, we tics. We implement the objective according to Gao et al. [40] experiment with three settings: two-shot prompting, chain-of- (referred to as CLR), where the model is trained to maximize thought prompting [38], and fine-tuning. We fine-tune GPT- the representation similarity between each sample and the 3.5 for one epoch on all vulnerabilities plus three times more perturbedversionofitselfandminimizethesimilaritybetween randomly sampled benign samples from the train split of two randomly chosen samples. The perturbation is directly PRIMEVUL, as fine-tuning on the full training set will run applied to the code representation through dropouts [41] in out of the budget. For GPT-4, we only consider two-shot and the Transformer model. chain-of-thought prompting since its fine-tuning API has not beenreleasedyet.Moredetailsabouttheprompttemplatewill TABLE VII: The impact of contrastive learning during fine- be discussed in Supplementary Material I.B. tuning UnixCoder for vulnerability detection. TABLE VIII: Results of OpenAI GPT models on PRIMEVUL Finetune Acc↑ F1↑ VD-S↓ P-C↑ P-V↓ P-B↓ P-R↓ paired functions. CLS 96.86 21.43 89.21 1.60 12.06 85.11 1.24 Model Method P-C ↑ P-V ↓ P-B ↓ P-R ↓ +CLR 96.83 21.73 90.07 1.77 11.35 85.82 1.06 +CA-CLR 96.64 24.46 90.50 2.30 15.96 81.38 0.35 Two-shot 5.67 13.83 77.84 2.66 GPT-3.5 CoT 6.21 4.79 83.51 5.50 Findings-RQ2.2: Contrastive learning fails to significantly Fine-tune 1.24 5.32 90.96 2.48 improve Code LMs’ performance on PRIMEVUL: Unfortu- Two-shot 5.14 71.63 21.45 1.77 nately, as shown in Table VII, we could not see a significant GPT-4 CoT 12.94 54.26 24.47 8.33 differencebyaddingtheCLRobjective.Wefurtheranalyzethe resultstoseewhatmightgowrong.Onenotablemisalignment RANDOMGUESS - 22.70 26.24 26.42 24.65 wenoticeisthat,sinceCLRfromGaoetal.[40]isnotcrafted for classification tasks, it will distinguish any two samples Results are shown in Table VIII. In general, GPT-3.5 and regardless of whether their labels are the same. Therefore, GPT-4 outperform open-source models for the pair-wise eval- we further improve CLR to be a second approach, called uationeveninthebasictwo-shotpromptingsetting,andchain- Class-awareContrastiveLearning(CA-CLR),whichwillonly of-thoughtreasoningfurtherpushestheperformanceboundary. minimizethesimilaritybetweensampleswithdifferentlabels. However, we realize that such performance is actually no This time, we see a more notable improvement over F1 better than a random guess since the majority of the pairs in and P-C by applying CA-CLR (Table VII). However, the PRIMEVUL still cannot be distinguished by these large SOTA performance change is still marginal. This result empirically code LMs, which might indicate the fundamental weaknesses demonstratescodeLMs’inherentincapabilitytodetectvulner- of code LMs to differentiate subtle vulnerabilities from their abilities.Itisnotonlybecausetherepresentationsaretoosim- benignversions.Furthermore,whenwefine-tuneGPT-3.5,we ilar to draw the classification boundary, but these models fail notice that the model is strongly biased by the 1:3 vulnerable
to identify the vulnerable patterns, so that, even if enlarging to benign ratio and reports even lower performance than the the cosine distance among representations through contrastive prompting approaches, showing a red flag that even such a learning,codeLMsstillfailtodrawtheclassificationboundary large LM (LLM) still fails to capture the vulnerable patterns correctly. but take shortcuts from data instead. 9given the small percentage of mislabeling, we believe all the Result-3: Eventhestate-of-the-artOpenAImodelscould reported results will still hold good. notachieveareliableperformanceon PRIMEVUL,calling for fundamentally novel approaches to improve the task. For the proposed evaluation metric VD-S, there is a con- figurable parameter r to control the maximum false positive rate.Thepracticallyacceptablevaluermightvaryfordifferent scenarios, changing the exact value of VD-S, but we expect VI. DISCUSSIONS&THREATSTOVALIDITY our general conclusions to hold. Discussion. Our study of code LMs in the realm of vulner- For experiments with OpenAI models, we only reported ability detection (VD) reveals that they do not perform well results with default settings, which could vary slightly by enough for real-world applications. Prior evaluations looked changing the hyperparameters. However, we do not expect promising,butourworkrevealstheirsubtleissues:dataquality hyperparameter tweaking would change the conclusion. problems, misleading metrics, and methodology that poorly matches the way in which these models would be used in practice. We highlight below several key areas where current VII. RELATEDWORK code LMs fall short. Throughout the paper, we have reviewed many related a) Need for More Context: Prior work formulates the works. Here, we provide a summary of the remaining ones problem as: given the code of a single function, determine and compare them with our contribution. whether that function contains a security vulnerability. How- Code-LM-based Vulnerability Prediction: Two primary ever, this may be asking an impossible question. Determining whether code is vulnerable generally depends on information methodstouseCodeLMsfor VD arefine-tuningandprompt- ing. Fine-tuning adds a randomly initialized binary classi- aboutothercomponentsofthesystemaswell,suchaswhether fication head to the language model and jointly optimizes inputstothefunctionhavealreadybeensanitized,howoutputs all weights based on ground-truth labels. Various approaches, will be used, or what invariants are established by the rest of such as encoder-decoder Transformers [25, 45], encoder-only the system. This focus on function-level analysis without the Transformers [22, 24], and decoder-only Transformers [13, consideration of other contexts (such as interprocedural data 36, 46] have been used for fine-tuning. On the other hand, flows) would make it difficult for even a human to detect prompting-basedmethods[42–44,47]relyonLLMs,typically vulnerabilities, let alone a model. We recommend that the proprietary ones like GPT-4. Previous studies yield mixed problem be reformulated so that the model also has access results: Khare et al. [42] showed that LLMs perform well on to a broader context. To enable such a process, PRIMEVUL synthetic datasets but not promising on real-world datasets. maintains the metadata for the included commits, providing Experimentation with different prompting strategies, notably resources to extract relevant contexts. variations of Chain-of-Thought (CoT) prompts, has further b) Augmenting Security Awareness: Our empirical re- shown promising results [47]. Integrating LLMs into larger sults, particularly the shortcomings of code LMs in pair- frameworks has shown promise in detecting specific vulnera- wise evaluations, suggest that these models make decisions bilities, such as Use Before Initialization vulnerabilities [43] primarily based on textual similarity, without considering the and smart contract vulnerabilities [44]. We analyzed many of underlying root causes or fixes of the vulnerabilities. We suggest researchers explore ways to teach code LMs about these Code LM-based VD methods in this paper and showed that in a very realistic setting none of them performs well. security concepts, such as pre-training methods inspired by how we teach human software developers about security, Empirical Analysis of Deep-learning-based Vulnerability or ways to build hybrid systems that combine LMs with Prediction (DLVP): Several works [4, 6, 20, 27, 48] have traditionalsecurityanalysisorprogramanalysistools[42,43]. pointedoutthatwhileDLVPmodelsmakecorrectpredictions, c) Teaching the model to reason about VD: Finally, theydosoforthewrongreasons;theyoftenrelyon“spurious posing vulnerability detection as a binary classification prob- features” that are not the root cause of the vulnerabilities. lem and teaching the Code LMs accordingly might be too Chen et al. [5] find that, through a large-scale evaluation simplistic. This approach banks on the slim hope that a lone involving 26k vulnerable functions across 300 projects and summary token or a condensed representation can embody 150 CWEs, DLVP lacks generalization to unseen projects and all the intricacies of code vulnerabilities—such expectation is still far from being deployed in the industry. Some of the might be overly optimistic. Instead, we should decompose the prior works [20, 49] focus on the lack of robustness of ML- VD problem into digestible sub-problems and teach the model based vulnerability detection algorithms against semantically to reason about each step to reach a conclusion [44]. The preservingmodifications.Anotherrecentwork[27]attemptsto slight yet encouraging progress observed with the chain of measurehowmuchmodelspickupthebugsemanticsthrough
thoughtexperimentsinTableVIIIshowssomepromiseinthis interpretability techniques involving the attention mechanism direction. and shows that extra annotation on the bug semantics also improvesthemodel’sperformance.Ourcurrentworkcomple- Threatstovalidity.Evenwithourstringentlabelingmethods, ments these lines of work by focusing more on benchmark label accuracy is less than 100% (see Table I), so there is still creation and evaluation techniques. a small portion of mislabeled data in PRIMEVUL. However, 10VIII. CONCLUSIONS DaxinJiang,DuyuTang,etal. Codexglue:Amachinelearning benchmarkdatasetforcodeunderstandingandgeneration.arXiv In this paper, we uncover significant limitations in existing preprint arXiv:2102.04664, 2021. vulnerabilitydetectiondatasets,suchaspoordataquality,low [9] JiahaoFan,YiLi,ShaohuaWang,andTienNNguyen. Ac/c++ label accuracy, and high data duplication rates, as well as the code vulnerability dataset with code changes and cve sum- limited practical utilities of current evaluation metrics. maries. In Proceedings of the 17th International Conference on Mining Software Repositories, pages 508–512, 2020. In response to these concerns, we present PRIMEVUL, [10] Guru Bhandari, Amara Naseer, and Leon Moonen. Cvefixes: accompanied by updated evaluation criteria designed to more automated collection of vulnerabilities and their fixes from accurately gauge practical effectiveness of Code LM-based open-sourcesoftware. InProceedingsofthe17thInternational vulnerability detectors. Through a series of experiments on Conference on Predictive Models and Data Analytics in Soft- PRIMEVUL, we find that even with efforts to improve per- ware Engineering, pages 30–39, 2021. [11] GeorgiosNikitopoulos,KonstantinaDritsa,PanosLouridas,and formance using sophisticated methods and expansive models, Dimitris Mitropoulos. Crossvul: a cross-language vulnerability existing Code LMs consistently fail to meet the demands of dataset with commit data. In Proceedings of the 29th ACM effective vulnerability detection in practical settings. This un- Joint Meeting on European Software Engineering Conference derscorestheurgentrequirementforfundamentallyinnovative and Symposium on the Foundations of Software Engineering, approaches in training Code LMs for security applications, pages 1565–1569, 2021. [12] Jingxuan He and Martin Vechev. Large language models for while also establishing a new benchmark for evaluating their code:Securityhardeningandadversarialtesting.InProceedings efficacy. of the 2023 ACM SIGSAC Conference on Computer and Com- munications Security, CCS ’23, page 1865–1879, New York, ACKNOWLEDGEMENT NY, USA, 2023. Association for Computing Machinery. ThismaterialisbaseduponworksupportedbytheNational [13] Anton Lozhkov, Raymond Li, Loubna Ben Allal, Federico ScienceFoundationundergrants2229876,2154873,2221943, Cassano,JoelLamy-Poirier,NouamaneTazi,AoTang,Dmytro Pykhtar, Jiawei Liu, Yuxiang Wei, Tianyang Liu, Max Tian, 2313055, 1845893, and 2107405, and by the Department of DenisKocetkov,ArthurZucker,YounesBelkada,ZijianWang, Homeland Security, IBM, the Center for AI Safety Compute QianLiu,DmitryAbulkhanov,IndraneilPaul,ZhuangLi,Wen- Cluster, the Noyce Foundation, C3.ai DTI, and the KACST- Ding Li, Megan Risdal, Jia Li, Jian Zhu, Terry Yue Zhuo, UCB Center of Excellence for Secure Computing. Any opin- Evgenii Zheltonozhskii, Nii Osae Osae Dade, Wenhao Yu, ions, findings, conclusions, or recommendations expressed in Lucas Krauß, Naman Jain, Yixuan Su, Xuanli He, Manan Dey,EdoardoAbati,YekunChai,NiklasMuennighoff,Xiangru this material are those of the author(s) and do not necessarily Tang,MuhtashamOblokulov,ChristopherAkiki,MarcMarone, reflect the views of the sponsors. Chenghao Mou, Mayank Mishra, Alex Gu, Binyuan Hui, Tri Dao, Armel Zebaze, Olivier Dehaene, Nicolas Patry, Canwen REFERENCES Xu, Julian McAuley, Han Hu, Torsten Scholak, Sebastien Pa- [1] Xinyi Hou, Yanjie Zhao, Yue Liu, Zhou Yang, Kailong Wang, quet, Jennifer Robinson, Carolyn Jane Anderson, Nicolas Cha- Li Li, Xiapu Luo, David Lo, John Grundy, and Haoyu Wang. pados, Mostofa Patwary, Nima Tajbakhsh, Yacine Jernite, Car- Large language models for software engineering: A systematic los Mun˜oz Ferrandis, Lingming Zhang, Sean Hughes, Thomas literature review, 2024. Wolf, Arjun Guha, Leandro von Werra, and Harm de Vries. [2] GitHub. Github copilot: Your ai pair programmer. https: Starcoder 2 and the stack v2: The next generation, 2024. //copilot.github.com/, 2021. [14] OpenAI. Gpt-4 technical report, 2024. [3] Amazon. Amazon codewhisperer: Build applications faster [15] ZhenLi,DeqingZou,ShouhuaiXu,XinyuOu,HaiJin,Sujuan and more securely with your ai coding companion. https: Wang, Zhijun Deng, and Yuyi Zhong. Vuldeepecker: A deep //aws.amazon.com/codewhisperer/, 2023. learning-basedsystemforvulnerabilitydetection.arXivpreprint [4] Saikat Chakraborty, Rahul Krishna, Yangruibo Ding, and arXiv:1801.01681, 2018. Baishakhi Ray. Deep learning based vulnerability detection: [16] Zhen Li, Deqing Zou, Shouhuai Xu, Hai Jin, Yawei Zhu, Arewethereyet. IEEETransactionsonSoftwareEngineering, and Zhaoxuan Chen. Sysevr: A framework for using deep 2021. learning to detect software vulnerabilities. IEEE Transactions [5] Yizheng Chen, Zhoujie Ding, Lamya Alowain, Xinyun Chen, onDependableandSecureComputing,19(4):2244–2258,2021.
and David Wagner. Diversevul: A new vulnerable source code [17] Vadim Okun, Aurelien Delaitre, Paul E Black, et al. Report dataset for deep learning based vulnerability detection. In on the static analysis tool exposition (sate) iv. NIST Special Proceedings of the 26th International Symposium on Research Publication, 500:297, 2013. in Attacks, Intrusions and Defenses, RAID ’23, page 654–668, [18] National Institute of Standards and Technology. Nist software New York, NY, USA, 2023. Association for Computing Ma- assurance reference dataset, Last accessed on March 19, 2023. chinery. [19] Yaqin Zhou, Shangqing Liu, Jingkai Siow, Xiaoning Du, and [6] Benjamin Steenhoek, Md Mahbubur Rahman, Richard Jiles, Yang Liu. Devign: Effective vulnerability identification by and Wei Le. An empirical study of deep learning models for learning comprehensive program semantics via graph neural vulnerability detection. In 2023 IEEE/ACM 45th International networks. Advances in neural information processing systems, ConferenceonSoftwareEngineering(ICSE),pages2237–2248, 32, 2019. 2023. [20] Niklas Risse and Marcel Bo¨hme. Limits of machine learning [7] Michael Fu and Chakkrit Tantithamthavorn. Linevul: A for automatic vulnerability detection, 2023. transformer-based line-level vulnerability prediction. In 2022 [21] Curtis Northcutt, Lu Jiang, and Isaac Chuang. Confident IEEE/ACM 19th International Conference on Mining Software learning: Estimating uncertainty in dataset labels. J. Artif. Int. Repositories (MSR), pages 608–620, 2022. Res., 70:1373–1411, may 2021. [8] Shuai Lu, Daya Guo, Shuo Ren, Junjie Huang, Alexey Svy- [22] Zhangyin Feng, Daya Guo, Duyu Tang, Nan Duan, Xiaocheng atkovskiy, Ambrosio Blanco, Colin Clement, Dawn Drain, Feng, Ming Gong, Linjun Shou, Bing Qin, Ting Liu, Daxin 11Jiang, et al. Codebert: A pre-trained model for programming language models to follow instructions with human feedback, andnaturallanguages. arXivpreprintarXiv:2002.08155,2020. 2022. [23] Daya Guo, Shuo Ren, Shuai Lu, Zhangyin Feng, Duyu Tang, [38] Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Shujie Liu, Long Zhou, Nan Duan, Alexey Svyatkovskiy, brianichter,FeiXia,EdH.Chi,QuocVLe,andDennyZhou. Shengyu Fu, et al. Graphcodebert: Pre-training code represen- Chain of thought prompting elicits reasoning in large language tations with data flow. arXiv preprint arXiv:2009.08366, 2020. models. In Alice H. Oh, Alekh Agarwal, Danielle Belgrave, [24] DayaGuo,ShuaiLu,NanDuan,YanlinWang,MingZhou,and and Kyunghyun Cho, editors, Advances in Neural Information JianYin. UniXcoder:Unifiedcross-modalpre-trainingforcode Processing Systems, 2022. representation.InSmarandaMuresan,PreslavNakov,andAline [39] JustinM.JohnsonandTaghiM.Khoshgoftaar. Surveyondeep Villavicencio, editors, Proceedings of the 60th Annual Meeting learning with class imbalance. Journal of Big Data, 2019. of the Association for Computational Linguistics (Volume 1: [40] TianyuGao,XingchengYao,andDanqiChen.SimCSE:Simple Long Papers), pages 7212–7225, Dublin, Ireland, May 2022. contrastive learning of sentence embeddings. In Empirical Association for Computational Linguistics. Methods in Natural Language Processing (EMNLP), 2021. [25] Yue Wang, Weishi Wang, Shafiq Joty, and Steven CH Hoi. [41] Nitish Srivastava, Geoffrey Hinton, Alex Krizhevsky, Ilya Codet5: Identifier-aware unified pre-trained encoder-decoder Sutskever,andRuslanSalakhutdinov. Dropout:asimplewayto models for code understanding and generation. arXiv preprint preventneuralnetworksfromoverfitting. J.Mach.Learn.Res., arXiv:2109.00859, 2021. 15(1):1929–1958, jan 2014. [26] Yue Wang, Hung Le, Akhilesh Gotmare, Nghi Bui, Junnan Li, [42] Avishree Khare, Saikat Dutta, Ziyang Li, Alaia Solko-Breslin, andStevenHoi.CodeT5+:Opencodelargelanguagemodelsfor RajeevAlur,andMayurNaik. Understandingtheeffectiveness code understanding and generation. In Houda Bouamor, Juan of large language models in detecting security vulnerabilities. Pino,andKalikaBali,editors,Proceedingsofthe2023Confer- arXiv preprint arXiv:2311.16169, 2023. ence on Empirical Methods in Natural Language Processing, [43] HAONANLI,YUHAO,YIZHUOZHAI,andZHIYUNQIAN. pages 1069–1088, Singapore, December 2023. Association for Enhancing static analysis for practical bug detection: An llm- Computational Linguistics. integratedapproach. InProceedingsofProceedingsoftheACM [27] Benjamin Steenhoek, Md Mahbubur Rahman, Shaila Sharmin, onProgrammingLanguages(PACMPL),IssueOOPSLA,2024. and Wei Le. Do language models learn semantics of code? a [44] Yuqiang Sun, Daoyuan Wu, Yue Xue, Han Liu, Wei Ma, case study in vulnerability detection, 2023. Lyuye Zhang, Miaolei Shi, and Yang Liu. Llm4vuln: A [28] Microsoft. Codexglue – defect detection, 2019. unified evaluation framework for decoupling and enhancing [29] Benjamin Steenhoek. Hugging face datasets, 2024. llms’vulnerabilityreasoning. arXivpreprintarXiv:2401.16185, [30] Miltiadis Allamanis. The adverse effects of code duplication 2024. in machine learning models of code. In Proceedings of the [45] Wasi Ahmad, Saikat Chakraborty, Baishakhi Ray, and Kai- 2019 ACM SIGPLAN International Symposium on New Ideas, Wei Chang. Unified pre-training for program understanding NewParadigms,andReflectionsonProgrammingandSoftware, and generation. In Kristina Toutanova, Anna Rumshisky, Luke
Onward! 2019, page 143–153, New York, NY, USA, 2019. Zettlemoyer, Dilek Hakkani-Tur, Iz Beltagy, Steven Bethard, Association for Computing Machinery. RyanCotterell,TanmoyChakraborty,andYichaoZhou,editors, [31] Miltiadis Allamanis, Earl T. Barr, Christian Bird, and Charles Proceedings of the 2021 Conference of the North American Sutton. Learningnaturalcodingconventions. InProceedingsof Chapter of the Association for Computational Linguistics: Hu- the 22nd ACM SIGSOFT International Symposium on Founda- man Language Technologies, pages 2655–2668, Online, June tions of Software Engineering, FSE 2014, page 281–293, New 2021. Association for Computational Linguistics. York, NY, USA, 2014. Association for Computing Machinery. [46] Starcoder: may the source be with you! Transactions on Ma- [32] Miltiadis Allamanis, Earl T. Barr, Premkumar Devanbu, and chine Learning Research, 2023. Reproducibility Certification. CharlesSutton. Asurveyofmachinelearningforbigcodeand [47] SaadUllah,MingjiHan,SaurabhPujar,HammondPearce,Ayse naturalness. ACM Comput. Surv., 51(4), jul 2018. Coskun, and Gianluca Stringhini. Can large language models [33] Md Rafiqul Islam Rabin, Aftab Hussain, Mohammad Amin identifyandreasonaboutsecurityvulnerabilities?notyet.arXiv Alipour,andVincentJ.Hellendoorn. Memorizationandgener- preprint arXiv:2312.12575, 2023. alization in neural code intelligence models. Information and [48] Antonio Valerio Miceli Barone, Fazl Barez, Shay B. Cohen, Software Technology, 153:107066, 2023. and Ioannis Konstas. The larger they are, the harder they [34] Yangruibo Ding, Saikat Chakraborty, Luca Buratti, Saurabh fail: Language models do not recognize identifier swaps in Pujar, Alessandro Morari, Gail Kaiser, and Baishakhi Ray. python. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Concord: Clone-aware contrastive learning for source code. In Okazaki,editors,FindingsoftheAssociationforComputational Proceedings of the 32nd ACM SIGSOFT International Sym- Linguistics: ACL 2023, pages 272–292, Toronto, Canada, July posium on Software Testing and Analysis, ISSTA 2023, page 2023. Association for Computational Linguistics. 26–38,NewYork,NY,USA,2023.AssociationforComputing [49] Saad Ullah, Mingji Han, Saurabh Pujar, Hammond Pearce, Machinery. Ayse Coskun, and Gianluca Stringhini. Can large language [35] AlexGu,Wen-DingLi,NamanJain,TheoXOlausson,Celine models identify and reason about security vulnerabilities? Not Lee,KoushikSen,andArmandoSolar-Lezama. Thecounterfeit yet, December 2023. conundrum: Can code language models grasp the nuances of [50] Roland Croft, M Ali Babar, and Mehdi Kholoosi. Data quality their incorrect generations? arXiv preprint arXiv:2402.19475, forsoftwarevulnerabilitydatasets. In2023IEEE/ACM45thIn- 2024. ternational Conference on Software Engineering (ICSE), 2023. [36] Erik Nijkamp, Hiroaki Hayashi, Caiming Xiong, Silvio Savarese, and Yingbo Zhou. Codegen2: Lessons for training llms on programming and natural languages, 2023. [37] Long Ouyang, Jeff Wu, Xu Jiang, Diogo Almeida, Carroll L. Wainwright,PamelaMishkin,ChongZhang,SandhiniAgarwal, KatarinaSlama,AlexRay,JohnSchulman,JacobHilton,Fraser Kelton, Luke Miller, Maddie Simens, Amanda Askell, Peter Welinder,PaulChristiano,JanLeike,andRyanLowe. Training 12SUPPLEMENTARYMATERIAL they are altered solely to align with security updates. One case12 is thewma_decode_initfunctionthatinvokesavulnerablefunction A. Detailed Label Error Analysis init_vlc. In our analysis, the function wma_decode_init is benign. As demonstrated in Section II.B and Section III.B of the main paper, we assess 50 vulnerable functions randomly sampled from eachdataset,includingCodeXGLUE[8,19],VulnPatchPairs[20]and SVEN [12], along with vulnerable functions identified by our two techniques, PRIMEVUL-ONEFUNC and PRIMEVUL-NVDCHECK. ThemanualanalysisresultsareshowninTable1ofthemainpaper. In this section, we provide a more detailed analysis of the wrongly labeled functions. Chen et al. [5] identify three primary sources of labeling er- rors. First, irrelevant functions—those subject to formatting, non- functionalalterationsnotlinkedtosecurityfixes,orcontainedwithin commits unrelated to security issues—were incorrectly tagged as vulnerable. Second, the vulnerability may spread across multiple functions,whichdoesnotmatchourgoaloftrainingneuralnetworks to learn whether a single function is vulnerable. Third, benign functions undergoing modifications during vulnerability fixes, such asparameterlistadjustmentsforconsistencywithalteredvulnerable functions,wereerroneouslymarkedasvulnerable.Buildingonthese three categories of labeling errors, we dissect the inaccuracies iden- tifiedinourmanualanalysis,withthedetailedbreakdownpresented in Table IX. Our manual analysis reveals that only 24% of the 50 data points evaluated from CodeXGLUE and 36% from VulnPatchPairs are genuinelyvulnerable.ThisisnoteworthyconsideringCodeXGLUE’s manual efforts in labeling security-related commits and VulnPatch- Pairs is built on CodeXGLUE. Contrasting our findings, Croft et al. [50] acknowledge a labeling accuracy issue with CodeXGLUE, yet their review finds 80% of their sampled vulnerable functions correctly labeled. This divergence largely stems from our more stringent criteria for identifying vulnerable functions. First, after a more careful examination, we find that 58% of the samples from CodeXGLUE and 40% from VulnPatchPairs are irrelevant to security and most of them originate from commits unrelated to security issues. Examples include commits mentioning
“instrumentmemorymanagement”6or“removead-hocleakchecking code”.7 Previous human annotators may misinterpret these commits as security-related due to certain keywords in the commit messages, despite their irrelevance to actual security issues. Additionally, we discover examples like commits focused solely on code migration8 or operating system compatibility improvements9, which are not Fig. 1: The template for the two-shot prompt. security-related at all. Second, contrary to the approach taken by Croft et al. [50], our methodology labels a function as vulnerable only if it indepen- dently constitutes a security risk. For instance, addressing a race condition, as seen in certain commits10, demands a comprehensive understanding of the system architecture, making it improper to assess a function’s vulnerability in isolation. Previous annotators often mark all functions associated with such race conditions as vulnerable.Anothercase11 involvesadenial-of-service(DoS)vulner- ability linked to the repetitive invocation of the recvmsg function. The qio_channel_websock_encode function, which merely shifts some values, does not directly lead to a DoS threat. Thus, we categorize qio_channel_websock_encode as non-vulnerable, diverging from prior analyses. Third,Croftetal.[50]labelthecallersofvulnerablefunctionsas vulnerable.Conversely,weclassifysuchfunctionsasbenignbecause 6https://github.com/qemu/qemu/commit/cd245a1 7https://github.com/qemu/qemu/commit/7d1b009 Fig. 2: The template for the chain-of-thought prompt 8https://github.com/qemu/qemu/commit/60fe637 9https://github.com/qemu/qemu/commit/b981289 10https://github.com/qemu/qemu/commit/c5a49c6 11https://github.com/qemu/qemu/commit/eefa3d8 12https://github.com/FFmpeg/FFmpeg/commit/073c259 13TABLE IX: Detailed breakdown for label error analysis. The error categories are adopted from Chen et al. [5]. WrongLabel Benchmark CorrectLabel VulnerabilitySpread RelevantConsistency Irrelevant AcrossMultipleFunctions SVEN[12] 94% 0% 0% 6% CodeXGLUE[8,19] 24% 18% 0% 58% VulnPatchPairs[20] 36% 10% 14% 40% PRIMEVUL-ONEFUNC 86% 4% 4% 6% PRIMEVUL-NVDCHECK 92% 4% 2% 2% B. Prompts for Open AI Models In this section, we show the template we used for prompting the OpenAI models. Figure1showsthetemplateoftwo-shotprompting,wherewestart fromasystemprompttowarmupthemodelwiththetaskitwilldeal with. Then it is followed by one benign example and one positive example. In the end, the new code to be predicted will be wrapped into the position <INSERT_NEW_CODE_HERE>. Figure 2 shows the template of chain-of-though reasoning for detecting the vulnerability. Similarly, the new code to be predicted will be wrapped into the position <INSERT_NEW_CODE_HERE>. 14
2403.19096 SCALE: Constructing Structured Natural Language Comment Trees for Software Vulnerability Detection Xin-ChengWen CuiyunGao∗ ShuzhengGao HarbinInstituteofTechnology HarbinInstituteofTechnology TheChineseUniversityofHongKong Shenzhen,China Shenzhen,China HongKong,China xiamenwxc@foxmail.com gaocuiyun@hit.edu.cn szgao23@cse.cuhk.edu.hk YangXiao MichaelR.Lyu ChineseAcademyofSciences TheChineseUniversityofHongKong Beijing,China HongKong,China xiaoyang@iie.ac.cn lyu@cse.cuhk.edu.hk ABSTRACT Xin-ChengWen,CuiyunGao∗,ShuzhengGao,YangXiao,andMichael Recently,therehasbeenagrowinginterestinautomaticsoftware R.Lyu.2024.SCALE:ConstructingStructuredNaturalLanguage vulnerabilitydetection.Pre-trainedmodel-basedapproacheshave CommentTreesforSoftwareVulnerabilityDetection.InProceed- demonstratedsuperiorperformancethanotherDeepLearning(DL)- ingsofACMSIGSOFTInternationalSymposiumonSoftwareTesting basedapproachesindetectingvulnerabilities.However,theexist- andAnalysis(ISSTA2024).ACM,NewYork,NY,USA,13pages. ingpre-trainedmodel-basedapproachesgenerallyemploycode sequencesasinputduringprediction,andmayignorevulnerability- 1 INTRODUCTION relatedstructuralinformation,asreflectedinthefollowingtwo Nowadays,modernsoftwaredevelopmentissignificantlyafflicted aspects.First,theytendtofailtoinferthesemanticsofthecode bytheprevalenceofsourcecodesecurityvulnerabilities[42].For statementswithcomplexlogicsuchasthosecontainingmultipleop- instance,accordingtoIBM,theyear2023witnessedapeakinthe eratorsandpointers.Second,theyarehardtocomprehendvarious averagecostofdatabreaches,reachingUS$4.45million[30].Over codeexecutionsequences,whichisessentialforprecisevulnerabil- thepastdecade,therehasbeenasubstantialincreaseinthequantity itydetection. ofidentifiedCommonVulnerabilitiesandExposures(CVEs)[2]. Tomitigatethechallenges,weproposeaStructuredNaturalLan- Specifically,duringthe2022,theidentifiedCVEnumberreached guageCommenttree-basedvulnerAbiLitydEtectionframework 25,227[49],witha25.1%increaseoverthenumberofvulnerabili- basedonthepre-trainedmodels,namedSCALE.Theproposed tiesdetectedin2021.Therefore,itbecomesimperativetodevelop StructuredNaturalLanguageCommentTree(SCT)integratesthe effectivemethodsfordetectingthesesoftwarevulnerabilities. semanticsofcodestatementswithcodeexecutionsequencesbased ConventionalProgramAnalysis(PA)-basedvulnerabilitydetec- ontheAbstractSyntaxTrees(ASTs).Specifically,SCALEcomprises tionmethodsheavilyrelyonuser-definedrulesandspecifications threemainmodules:(1)CommentTreeConstruction,whichaimsat toidentifyvulnerabilities,suchasINFER[16]andCheckMarx[31], enhancingthemodel’sabilitytoinferthesemanticsofcodestate- whichmakesthemlabor-intensiveandtime-consuming.Recently, mentsbyfirstincorporatingLargeLanguageModels(LLMs)for DL-basedmethodshaveachievedgreatsuccessastheycanreduce commentgenerationandthenaddingthecommentnodetoASTs. thedependenceonexpertknowledgeandofferabroaderspectrum (2)StructuredNaturalLanguageCommentTreeConstruction,which ofcapabilitiesindetectingvarioustypesofsoftwarevulnerabili- aimsatexplicitlyinvolvingcodeexecutionsequencebycombining ties[13].EarlyDL-basedmethodsuseConvolutionalNeuralNet- thecodesyntaxtemplateswiththecommenttree.(3)SCT-Enhanced works(CNNs)[61,62],RecurrentNeuralNetworks(RNNs)[38,48] Representation,whichfinallyincorporatestheconstructedSCTsfor andGraphNeuralNetworks(GNNs)[10,65]forsupervisedtrain- wellcapturingvulnerabilitypatterns.Experimentalresultsdemon- ing.However,theperformanceofthesemodelscanbelargelylim- stratethatSCALEoutperformsthebest-performingbaseline,in- ited due to the scarcity of vulnerability data [45]. Recently, the cludingthepre-trainedmodelandLLMs,withimprovementsof adventofpre-trainedmodelshasfurtheradvancedthisfield.These 2.96%,13.47%,and3.75%intermsofF1scoreontheFFMPeg+Qemu, modelsaretrainedonmassiveopen-sourcecoderepositoriesand Reveal,andSVulDdatasets,respectively.Furthermore,SCALEcan havepossessedavastgeneralprogrammingknowledge.There- beappliedtodifferentpre-trainedmodels,suchasCodeBERTand centstudiesmainlyresorttothepre-trainingfine-tuningparadigm UniXcoder,yieldingtheF1scoreperformanceenhancementsrang- whichinjectsvulnerabilityknowledgebyfurthertrainingmodels ingfrom1.37%to10.87%. onvulnerabilitydatasets.Forexample,EPVD[63]andSVulD[43] achievestate-of-the-artperformanceforvulnerabilitydetection. ∗Correspondingauthor.TheauthorisalsoaffiliatedwithPengChengLaboratoryand Althoughachievingpromisingperformance,theeffectivenessof GuangdongProvincialKeyLaboratoryofNovelSecurityIntelligenceTechnologies. thepre-trainingtechniquesisstilllimited,embodiedintwoaspects: (1)Thepre-trainedmodelstendtofailtoinferthesemanticsofthe codestatementswithcomplexlogicsuchasthosecontainingmultiple ISSTA2024,16-20September,2024,Vienna,Austria operatorsandpointers.Thepreviousstudies[46,56]haveshown
©2024AssociationforComputingMachinery. thatthe“reasoninggap"inpre-trainedmodelsposesasignificant 4202 raM 82 ]ES.sc[ 1v69091.3042:viXraISSTA2024,16-20September,2024,Vienna,Austria TrovatoandTobin,etal. dEtection framework based on the pre-trained models, named 1 int net_init_tap(const Netdev *netdev, ...) { ... ... SCALE. The proposed Structured Natural Language Comment 16 if (tap->has_fd) { Tree(SCT)integratesthesemanticsofcodestatementswithcode 17 if (tap->has_ifname || tap->has_script ||tap- >has_downscript || tap->has_vnet_hdr ||...) { executionsequencesbasedontheAbstractSyntaxTrees(ASTs). 18 error_setg(errp, "ifname=, ..."); SCALEhasthreemainmodules:(1)CommentTreeConstruction. 19 return -1; } 20 fd = monitor_fd_param(cur_mon, tap->fd, &err); SCALEmakesthefirstattempttoincorporatetheLargeLanguage 21 if (fd == -1) { Model(LLM)’sgeneralknowledgeforcommentgenerationinthe 22 error_propagate(errp, err); 23 return -1; } vulnerabilitydetectiontaskandfurthergeneratesthecomment ... ... 27 if (err) { tree,whichenhancesthemodel’sabilitytoinferthesemanticsof 28 error_propagate(errp, err); codestatements.(2)StructuredNaturalLanguageCommentTree 29 return -1; } 30 } else if (tap->has_fds) { Construction,whichexplicitlyinvolvescodeexecutionsequenceby 31- char **fds = g_new0(char *, MAX_TAP_QUEUES); combiningthecodesyntaxtemplateswiththecommenttree.(3) 32- char **vhost_fds = g_new0(char *, MAX_TAP_QUEUES); + char **fds; SCT-EnhancedRepresentation,whichincorporatestheconstructed + char **vhost_fds; SCTsforwellcapturingvulnerabilitypatterns. 33 int nfds, nvhosts; 34 if (tap->has_ifname || tap->has_script || tap- ToevaluateSCALE,weusethreewidely-useddatasetsinvulner- >has_downscript...) { abilitydetection:FFMPeg+Qemu[65],Reveal[10],andSVulD[43]. ... ... 36 return -1; } WecompareSCALEwithelevenexistingvulnerabilitydetection + fds = g_new0(char *, MAX_TAP_QUEUES); + vhost_fds = g_new0(char *, MAX_TAP_QUEUES) methods.ExperimentalresultsdemonstratethatSCALEoutper- ... ... formsthestate-of-the-artbaseline,withimprovementsof2.96%, 39 if (tap->has_vhostfds) {... } 13.47%,and1.17%intermsofF1scoreonthethreedatasets,re- spectively.Furthermore,SCALEcanbeappliedtodifferentpre- trainedmodels,yieldingF1scoreperformanceenhancementsrang- Figure1:Acodeexamplethatpresentsamemoryleakvul- ingfrom1.37%to10.87%.Theseresultsunderscoretheeffectiveness nerabilityintheQemuproject,andismisclassifiedbyUniX- ofSCALEinenhancingthedetectionofsoftwarevulnerabilities. coderasnon-vulnerable.Theredandgreenlinesrepresent Insummary,themajorcontributionsofthispaperaresumma- thepatchedcodesegmentsbeforeandafterfixed,respec- rizedasfollows: tively. (1) Tothebestofourknowledge,weproposeanovelstructured naturallanguagecommenttree,whichenhancesthemodel’s challenge.Itmeansthatthesemodelsstruggletoinferthelogicof abilitytoinfercodesemanticsbyintegratingcodecomments thesourcecode,suchascontainingmultipleoperatorsandpointers, andcodesyntaxtemplates. whereasingleinferenceerrormayresultintheoppositeprediction. (2) WeproposeSCALE,anovelvulnerabilitydetectionframe- Differentfromnaturallanguage,duetothestrictrulesandsyntax, workforwellcapturingvulnerabilitypatternsbyenhancing sourcecodemayneedmultipleabstractsymbolsinastatementto coderepresentationwiththestructurednaturallanguage expressasimpleintent.Forinstance,thecodesnippetshownin commenttree. Figure1fromtheQemu[4]projectpotentiallycausesamemory (3) Weperformanextensiveevaluationonthreepopulardatasets, leakvulnerabilityinthenet_init_tap()function.InLines31- andtheresultsdemonstratetheeffectivenessofSCALEin 32,thecodesnippetallocatesmemoryforthearraysofcharacter softwarevulnerabilitydetection. pointersfdsandvhost_fds.However,ifanyoftheseerrorchecks failedaftertheallocationinLines34-36,thefunctionwouldexit withoutproperlydeallocatingthesearrays,leadingtoamemory 2 BACKGROUNDANDRELATEDWORK leakvulnerability.Inthisexample,thecomplexlogicofthesource Weclassifytheexistingvulnerabilitydetectionmethodsintofour codeinLine34,ischaracterizedbytheuseofmultipleoperators types:Programanalysis-based,Supervised-based,Pre-trainedmodel- (||and->),leadingtoerroneouspredictions.Itpresentschallenges basedandLLM-basedmethods,asillustratedinFigure2. forpre-trainedmodelsindetectingsuchvulnerabilitypatterns. (2) The pre-trained models are hard to capture the code execu- 2.1 ProgramAnalysisMethods tionsequences.Sincepre-trainedmodelsprimarilyutilizecodese- quencesasinput,theyarehardtocomprehendtheinherentlynon- Numerousprogramanalysismethodologieshavebeenproposed sequentialandvariouscodeexecutionsequencesofsourcecode, and demonstrated their effectiveness in vulnerability detection,
suchascommonly-usedifandreturnstatements.Forinstance, suchasCheckMarx[31],FlawFinder[60],PCA[34]andRATs[6]. theexampleshowninFigure1containsseveralifstatementsand Thesemethodologiestypicallyemploypre-definedrulesorpatterns returnstatements,inwhichthespecificuseof ifinline34and toidentifyimproperoperationswithinsourcecode.Figure2aillus- return in line 36 directly causes the vulnerability. These state- tratesanexampleoftherulesproposedbySaber[50],whichusethe mentsnecessitatemultipleconditionalifstatementstotriggera symbolicrules[7,32]tosimulateprogramminglogicinsourcecode. return -1.Itischallengingforthepre-trainedmodelstopredict However,thecreationofwell-definedvulnerabilityrulesorpatterns thevulnerabilitywithoutwellcapturingcodeexecutionsequences. heavilyreliesonexpertknowledge[35,36],makingitchallenging Toaddressthelimitationsabove,inthispaper,weproposea tocoverallpotentialcases.Furthermore,thecomplexprogram- StructurednaturallanguageCommenttree-basedvulnerAbiLity minglogicinherentinreal-worldsoftwareprojectshinderstheSCALE:ConstructingStructuredNaturalLanguageCommentTreesforSoftwareVulnerabilityDetection ISSTA2024,16-20September,2024,Vienna,Austria Code C oSCdtroeud Steu t rrSuettr uGurtr eua rp Geh r Gaprahph S1S1 S: 1 S: 1= : = .:..= .. =.. ..... Featur·· e· ··· ·· .· .. S1 +++ + ++++ C1C1 C1S C2 1S2 S¬2SC2¬1C ¬1 C¬1C1 ··· ··· ··· SS· n2·· ··· ··· SouSrScoeo uS u orccrouecdr e cce eoc do ceod de ePre MP-tr orP e MaP d-rir eten oMer Ml- ae d-t t oid er onra d l a de i ei ed n n l l e e d d S ouSrocueSS ro o ccueou r dr ccceoee d c ec oo dd ePeroPmropPmPtropro mtm ptpt S3S3 S3S3 S5S5 S5S5 C2C2 C2C2 2 : 2 :22 :=22 : =2 =2 = GNNDG MLNG NMoNd MNoed loMedloedlel FineF-intFuFeiinn-nteieun--ngttuiunnngiinngg LLMLLMLLLMLM S4S4 S41 S:41 :1 1 :=11 := 1 =1 = AnAswnseAwArn nefsosrw wrfe oerr r f ofor r ( (, , (1 )(,1 = ),1 = )11=) ⋀1= ⋀21 ⋀12 ⋀2 2 ClasCCslilafaCiseslssariisffiiseeifrrier ClasCslCiafClisaelsasrifssisiefirfieierr clacslsaifscicslcaliaafsitscsisaiofitinfciioacnatitoionn ( (, ,(2 )(2 , = ),2 =¬)2 =¬)1= ¬⋁1( ¬⋁ 1 1(⋁1 ⋀(⋁1 (⋀12 ⋀)12 ⋀)2 )2) (a)Programanalysismethods (b)Supervisedmethods (c)Pre-trainedmodelmethods (d)LLM-basedmethods Figure2:Fourtypesofexistingvulnerabilitydetectionmethods. manualidentificationoftheserules,underminingtheperformance suchasCodeBERT[18],canbeutilizedtodetectvulnerabilities. oftraditionalprogramanalysis-basedapproaches. CodeT5 [55] and UniXcoder [25] are designed to support both code-relatedclassificationandgenerationtasks.Byleveragingthe 2.2 SupervisedMethods knowledgeencapsulatedinpre-trainedmodels,thesepre-trained model-basedapproachesachievethebestperformanceinvulnera- WiththeadventofDeepLearning(DL),numeroussupervisedmeth- bilitydetection.EPVD[63]proposesanexecutionpathselection odshavebeenproposedthatutilizeDL-basedmodelstocapture algorithmandadoptsapre-trainedmodeltolearnthepathrep- programsemanticsfortheidentificationofpotentialsoftwarevul- resentations.SVulD[43]constructscontrastivepairedinstances nerabilities,whichmainlyincludethesequence-based[38,39]and andusesthepre-trainedmodeltolearndistinguishingsemantic graph-based[10,37,65]approaches.Figure2billustratestheprocess representations. ofthesemethods.Theytypicallyextractfeaturesfromsourcecode However,thepre-trainedmethodsgenerallyemploycodese- andadopttheDLmodelsforclassification.Forinstance,VulDeeP- quences as input during prediction. They may fail to infer the ecker[39]andSySeVR[38]usethebidirectionalLongShort-Term semanticsofthecodestatementwithcomplexlogicandhardtocap- Memory(LSTM)networkforvulnerabilitydetectionandextract turethecodeexecutionsequence.Inthiswork,weaimtomitigate thecodegadgetsfromthesourcecode. theissuesofpre-trainedmodelsforbetterlearningthevulnerability- Then,graph-basedmethodshavegainedsignificantpopularity relatedstructuralinformation. duetotheirinterpretability,whichhasdemonstratedmoreeffec- tivenessthansequence-basedmethods.Theyextractstructured representationsfromthesourcecode,suchasAST,CFG,DFG,and CodePropertyGraphs(CPG)[54].TheythenemployGraphNeural 2.4 LLM-basedMethods Networks(GNNs)[53]modelstolearnthegraphrepresentation Inrecentyears,LargeLanguageModels(LLMs)havegainedpromi- forclassification.Incomparisontotraditionalprogramanalysis- nence[8]andwidespreadadoptioninthefieldsofNaturalLanguage basedapproaches,thesemethodscanautomaticallyextractimplicit Processing(NLP)[40]andSoftwareEngineering(SE)[28].Figure2d vulnerabilitypatternsfrompreviouslyvulnerablecode,eliminat- illustratestheprocessoftheseLLMs[11,21].Notableexamples ingtheneedforexpertinvolvement.However,thesemethodsare
includetheseriesofGPTmodelsproposedbyOpenAI,suchasChat- constrained by semantic embedding and overly complex graph GPT[11]andGPT-4[44],aswellastheLLaMAmodelsintroduced structures. byMeta,includingLLaMA[51]andLLaMA2[52]. Furthermore,beyondthesegeneral-purposeLLMs,specificLLMs 2.3 Pre-trainedModelMethods trainedoncoderepositorieshavealsoachievedparametersinthe WhileGNN-basedmethodshavedemonstratedsatisfactoryperfor- billion-scalerange,suchasINCODER[19],StarCoder[33],and mance,theynecessitatelargeandhigh-qualitydatafortraining[14], CodeLlama[47].Forexample,CodeLlama[47],withitssubstantial whichcanbechallengingtoprocureinreal-worldscenarios[59]. 34-billion-parametermodel,excelsincodegenerationandcomple- Analternativeapproachinvolvestheuseofpre-trainedmodels tiontasks.Thesemodelshaveconsistentlydemonstratedcommend- asthefoundationalbackbone,whicharethenfine-tunedforthe ableperformanceacrossaspectrumofcode-relatedintelligence specificdownstreamtask.Figure2cillustratestheprocessofpre- tasks[23]. trainedmodel-basedmethods[20,22,26,64].Thesemethods,which However,theseLLMsfacesignificantchallengeswhenapplied donotrequireexpertinvolvementorthegenerationofstructured to software vulnerability detection [21]. It is mainly caused by graphs,utilizesourcecodeasinput.Theythensimplyfine-tune individual code snippets often containing numerous undefined thepre-trainedmodelandonlytraintheclassifierforvulnerability identifiersandLLMsoftenlackdomainknowledgeforvulnerability detection.Allpre-trainedcodemodelswithencodercomponents, detection.ISSTA2024,16-20September,2024,Vienna,Austria TrovatoandTobin,etal. 3 PROPOSEDFRAMEWORK Table1:ThesymbolicrulesforconstructingtheSCT.“[-]”in- dicatesthenodevaluetoreplaceand“-”denotestheoriginal Inthissection,weelaborateontheoverallarchitectureofSCALE. ASTnodetypeparsedbytheTree-sitter. AsshowninFigure3,SCALEmainlyconsistsofthreemodules: commenttreeconstruction,structurednaturallanguagecomment treeconstruction,andSCT-enhancedrepresentation,withdetails Categories TargetNodes SymbolicTemplates asbelow. if([condition]) If 3.1 CommentTreeConstruction if-branch if([condition]) Thepurposeofthecommenttreeconstructionphaseistoenrich if-branch correspondingcodesnippetswithnaturallanguagedescriptions.It Selection If-else canrelievethedifficultiesinunderstandingthesemanticsofcode else[] statementswithmultipleoperatorsandpointers,andthushelpthe else-branch modelinferthecodelogicmoreeffectively.Inspiredbytheremark- switch([condition]) Switch ableachievementofLLMs,weseektoutilizetheirextensiveknowl- statement edgebasetobridgethereasoninggapinvulnerabilitydetection. Specifically,weleverageChatGPTtogeneratecommentsforeach while([expression]) While codesnippet.FollowingOpenAI’sgpt-best-practicesdocument[3], statement weemploytherole-basedbasicprompttoremindChatGPT[11] for([init-expression;condition-expression; ofitsjob(i.e.,generatecomments)foreachprovidedcodesnippet Iteration For loop-expression]) andusethe“gpt-3.5-turbo-0301"APIforthecommentgeneration. statement ToavoidLLMsfromgeneratingunnormalizedcode,wethennor- for([for-range-declaration:expression]) malizethecommentstofacilitatethesubsequentprogramanalysis Range-basedfor part.Specifically,weemploysomenormalizationoperationsinclud- statement ingremovingallblanklinesandreplacingmulti-linecomments Break [break]; enclosedintheformoftriplesinglequotationmarks(′′′)todouble Continue [continue]; slashes(//).SCALEthengeneratesAbstractSyntaxTrees(ASTs) Jump Return return[expression]; basedonsourcecodebyTree-sitter[5]libraryforconstructingthe commenttree.AgeneratedASTcanbeformulatedasadirected Goto goto[identifier]; graph(𝑁,𝐸),where𝑁 denotesthesetofnodesand𝐸represents case[constant-expression]: thesetofedges.Eachnode𝑛 ∈ 𝑁 withintheASTisarepresen- Labeled Case statement tationofapair (𝑉,𝑇),with𝑣 ∈𝑉 representingthenode’svalue and𝑡 ∈𝑇 representingitstype.Foraddingthecommentnode,we findthefirstnodeonthesubsequentrowanddesignatethisasthe targetnodeforthecomment.Thecommentnodeisthenintegrated andinwhatorderprogramsaremanipulatedandcreatestructured intotheASTbytakingtheparentofthetargetnodeasitsparent naturallanguagerulesforthem,includingselectionstatements, andpositioningitastheparentofthetargetnode.Forexample,as iterationstatements,jumpstatements,andlabeledstatements.The showninFigure4and5a,wegeneratethecommentforLine3and elevenstructurednaturallanguagerulesforthesefourcategories traversetheASTofthesourcecode,wherethefirstnodeinLine ofstatementsareshowninTable1.Astructurednaturallanguage 3isA“if_statement”.WeaddthecommentnodeBasaprevious rulerepresentsatypeofstatementandcorrespondingtargetnode siblingnodeofAandintegrateitintoAST.Thisapproachensures types. thatthecommentsarelogicallyandstructurallyalignedwiththe Selection Statements: The selection statements encompass followingstructurednaturallanguagerules. “if”,“if-else”,and“switch”statements,whichprovideameansto conditionallyexecutesectionsofcode.Toconstructthestructured 3.2 StructuredNaturalLanguageCommentTree naturallanguagerules,SCALEstartswithfindingthetargetnode
Construction (e.g.,𝑖𝑓 and𝑠𝑤𝑖𝑡𝑐ℎ)andcorrespondingcommentsdescription.Then Inthissection,wefirstpresentourstructurednaturallanguage ittraversesforwardonthetargetnode’schildandsiblingnodes rulesforstructurednaturallanguagecommentsynthesis.Then toconfirmtheconditionbranchingexpression.Finally,thesecom- wedescribeourproposedalgorithmforbuildingSCTs.TheSCT mentdescriptionsreplacethecontentbyutilizingcorresponding integratescodecommentsandcodesyntaxtemplates,andprovides structurednaturallanguagetemplates.FortheexampleinFigure5b, richcodeexecutioninformationforinferringvulnerabilitypatterns. SCALEidentifiesthetargetnodeA𝑖𝑓_𝑠𝑡𝑎𝑡𝑒𝑚𝑒𝑛𝑡 (highlightedin blue)andextractsthecorrespondingcommentnodeB(highlighted 3.2.1 StructuredNaturalLanguageRules. Tointegrateexecutionse- ingrey).Itthenreplacesthechildnode’sC(highlightedinred) quenceswiththenaturallanguageformoreeffectivelyidentifying valuewiththecommentnodeB(highlightedingrey). vulnerabilitypatterns,weintroduceasetofstructurednaturallan- IterationStatements:Theiterationstatementsenabletheex- guagerules.FollowingtheguidelinesofC++languagereference[1], ecutionofstatementszeroormoretimes,subjecttospecificter- weselectfourdistinctcategoriesofstatementsthatcontrolhow minationcriteria.WehopetoemploynaturallanguagerulestoPre-trained model Source code AST Source code Syntax Templates Source code + + + Comment Tree Symbolic Comment Multimodel Classification Tree Fusion Prompt Comment Com Trm eeent Symbolic rule Symboli Tc r C eeo mment (1) Comment Tree Construction (2) Symbolic Comment Tree Construction (3) SCT-Enhanced Comment SCALE:ConstructingStructuredNaturalLanguageCommentTreesforSoftwareVulnerabilityDetection ISSTA2024,16-20September,2024,Vienna,Austria ... Source Code AST Syntax Templates Comment Structured Natural Multimodel Classification Tree Language Comment Tree Fusion Prompt LLM Comment Structured Natural Language Rule (1) Comment Tree Construction (2) Structured Natural Language Comment Tree Construction (3) SCT-Enhanced Representation Figure3:TheoverviewofSCALE. 1 int ff_alloc_entries(AVCodecContext *avctx, int count) {... translation_unit 2 if (avctx->active_thread_type & FF_THREAD_SLICE) { 3 4- + i if f ( (! Cp h- e> ce kn itr fi e as ll) o c{ ation was successful) { FunctC ioo nm dm efe inn it t ion ... function_definition 5- return AVERROR(ENOMEM); } primitive_type function_declarator compound_statement 6+ return Return an error code if allocation failed;} 7 // Assign count to entries_count of p 8 p ...->entries_count = count; { CheckC io f m alm loe can tt i on ..B . if_statementA } 19 0- + f fo or r ( ([ [i L o= o p0 ; t hi r o< u gp h- > tt hh rr ee aa dd __ cc oo uu nn tt ; n ui m+ b+ e] r) o{ f t i m e s ]) { if parenth (e !ps -i >ze end t_ re iex sp )ression compound_statement 11 pthread_mutex_init(&p->progress_mutex[i], NULL); 1 12 3 return 0; } ...}} node type ( unary_expression ) node value field_expression { RetuC ro nm anm ee rn rt o r ... return_statement Figure4:Anexampleofstructurednaturallanguagecom- identifier -> field_identifier return call_expression ; AVERROR(ENOMEM); mentinSCALE.Theredandgreylinesdenotetheoriginal (a)Anexampleofthecommenttree. sourcecodeandstructurednaturallanguagecomment,re- spectively. translation_unit FunctC ioo nm dm efe inn it t ion ... function_definition primitive_type function_declarator compound_statement facilitatethecomprehensionoflooplogicandtriggeringconditions bymodels. { CheckC io f m alm loe can tt i on ..B . if_statementA } Wedesignthethreestructurednaturallanguagerulesinthe AST node if parenthesized_expression compound_statement (!p->entries) C secondpartofTable1.Forthe“while”statement,SCALEstarts Comment node withthetargetnodetypeof𝑤ℎ𝑖𝑙𝑒.Thenittraversesforwardon Target node ( unary_expression ) thetargetnode’schildorsiblingnodestosubstitutetheexpres- Replacement node field_expression { RetuC ro nm anm ee rn rt o r ..D . return_statemE ent sionwiththecorrespondingcomment.Theprocessofconstructing structurednaturallanguagerulesforthe“for”andthe“range-based- Deleted node identifier -> field_identifier return Ac Va El Rl R_ Oe Rx (p Er Ne Os Msi Eo Mn );F ; for”statementscloselymirrorsthatofthe“while”statement.For (b)AnexampleoftheSCT. the“for”statement,thereplacementencompassesinit-expression, condition-expression,andloop-expression,whereininit-expression Figure5:TheexampleofthecommenttreeandSCT.The and loop-expression can contain multiple separated statements. blackandredfontdenotesthenodetypeandvalue,respec- AsshowninFigure4,Line9(highlightedingrey)indicatesthe tively.Theyellow,green,blue,red,andgray-shadednodesde- sourcecodeofthe“for”statement,andLine10(highlightedinred) notetheoriginalnodes,commentnodes,targetnodes,nodes indicatesthegeneratedstructurednaturallanguagecomments. toreplace,anddeletednodes,respectively. JumpStatements: Thejumpstatementsperformanimme- diatelocaltransferofcontrol.Thesestatementssharecommon semantics,suchasthe“break”statement,therebyalteringthepro- gram’sexecutionflow.Despitesharingthesametokens,different intermediariesbetweenthejumpstatementandtheintendedin- “return” statements possess different implicit targets, often ren- ferencetarget,aidinginindicatingthecodeexecutionsequence deringthemchallengingtodiscern.Consequently,thestructured Weintroducefourcategoriesandthestructurednaturallanguage
naturallanguagerulesforjumpstatementscanbeconstructedas templatescanbefoundinthethirdsectionofTable1.ForthefirstISSTA2024,16-20September,2024,Vienna,Austria TrovatoandTobin,etal. Algorithm1:StructuredNaturalLanguageCommentTree identifiesandrecordseachvisitednodeincludingstatement,ex- Construction pression, and identifier nodes. When SCALE visits a node that Input :TheCommentTreeofgivenfunction:𝑓𝑢𝑛𝑐_𝑐𝑡 conforms to a structured natural language rule, it captures the Output :StructuredNaturalLanguageCommentTreeofthegiven node’svaluesandincorporatestherelevantcommentfollowing function,𝑓𝑢𝑛𝑐_𝑠𝑐𝑡 Initialize:Initializeanactionlist𝑎𝑐_𝑙𝑖𝑠𝑡 thestructurednaturallanguagecommentsrules.Accordingtothe 1 FunctionStructuredNaturalLanguageCommentTreeConstruction: parsingrules[5],itinvolvestwoconditions:(1)Ifthetargetnode // Using structured natural language rules to construct occursinthechildnode,SCALEwilltraverseallchildnodestoadd structured natural language comment tree 2 if𝑛𝑜𝑑𝑒.type∈𝑎𝑐_𝑙𝑖𝑠𝑡and𝑛𝑜𝑑𝑒.commentexistthen structurednaturallanguagecomments(Lines4-12).(2)Ifthetarget 3 if𝑛𝑜𝑑𝑒.type∈Table1and𝑛𝑜𝑑𝑒.parent.type==“comment”then nodeoccursinthesiblingnode,SCALEwillcheckitsparentnode // Processing for child nodes andonlyconfirmthenextsiblingnode(Lines13-19).Nodevalues 4 5 fora il fl𝑐 𝑐ℎ ℎ𝑖 𝑖𝑙 𝑙𝑑 𝑑_ _𝑛 𝑛𝑜 𝑜𝑑 𝑑𝑒 𝑒.t∈ yp𝑛 e𝑜 =𝑑 =𝑒. “c ph ail rd er ne tn hei ss in zeo dt _v ei xsi pt re ed ssd ioo n”then meetingtheseconditionsarethenupdated.Thisprocesswillbe 6 st_comment=𝑛𝑜𝑑𝑒.comment completeduntilthecommenttreeistraversedandreturntheSCT. 7 delete𝑐ℎ𝑖𝑙𝑑_𝑛𝑜𝑑𝑒.value Finally,SCALEflattensthecurrentSCTandgeneratesthestruc- 8 𝑐ℎ𝑖𝑙𝑑_𝑛𝑜𝑑𝑒.value=st_comment turednaturallanguagecommentthroughtheASTflattenopera- 9 update(𝑓𝑢𝑛𝑐_𝑐𝑡) tor[5].Figure4showsanexampleoftheflattenedSCTultimately 10 break 11 end obtainedformodeltraining. 12 end // Processing for next sibling node 3.3 SCT-EnhancedRepresentation 13 ifnext𝑠𝑖𝑏𝑖𝑛𝑔_𝑛𝑜𝑑𝑒existsand𝑠𝑖𝑏𝑖𝑛𝑔_𝑛𝑜𝑑𝑒.type== TheSCT-enhancedrepresentationmoduleaimstoincorporatethe “parenthesized_expression”or“compound_expression”then 14 st_comment=𝑛𝑜𝑑𝑒.comment constructedSCTsforwell-capturingvulnerabilitypatterns.Wefirst 15 delete𝑐ℎ𝑖𝑙𝑑_𝑛𝑜𝑑𝑒.value utilizeUniXcoderastheencoderforsourcecodeandstructured 16 𝑐ℎ𝑖𝑙𝑑_𝑛𝑜𝑑𝑒.value=st_comment naturallanguagecomments,asUniXcoder[25]isaunifiedcross- 17 update(𝑓𝑢𝑛𝑐_𝑐𝑡) modal(i.e.,code,comment,andAST)pre-trainedmodel.SCALE 18 break 19 end feeds the source code and SCT into the pre-trained model and 20 end obtainstheirrepresentationsℎ 𝑐 ∈ R𝑙×𝑛 andℎ 𝑐𝑡 ∈ R𝑙×𝑛 ,where𝑙 21 end and𝑛denotethelengthofinputandthedimensionofembedding, 22 𝑓𝑢𝑛𝑐_𝑠𝑐𝑡=𝑓𝑢𝑛_𝑐𝑡 23 return𝑓𝑢𝑛𝑐_𝑠𝑐𝑡 respectively.Wethenproposetofusetherepresentationofsource code and structured natural language comment through Cross- Attention[12].Specifically,SCALEfirstmapstherepresentations twostructurednaturallanguagerulesofthejumpstatements,we into: findthetargetnodesof𝑏𝑟𝑒𝑎𝑘and𝑐𝑜𝑛𝑡𝑖𝑛𝑢𝑒.Thenwecombinethe 𝑄 =𝑊 𝑄(𝑖) ·𝜑(ℎ 𝑐𝑡),𝐾 =𝑊 𝐾(𝑖) ·𝜑(ℎ 𝑐),𝑉 =𝑊 𝑉(𝑖) ·𝜑(ℎ 𝑐) (1) initialnodevaluewiththecorrespondingcommentstoproducethe nodevalue.Forthe𝑟𝑒𝑡𝑢𝑟𝑛and𝑔𝑜𝑡𝑜 targetnodes,wecollectthe where𝑊(𝑖) ,𝑊(𝑖) and𝑊(𝑖) arethelinearprojectionofthe𝑖-th 𝑄 𝐾 𝑉 childnodevalueof𝑖𝑑𝑒𝑛𝑡𝑖𝑓𝑖𝑒𝑟 and𝑒𝑥𝑝𝑟𝑒𝑠𝑠𝑖𝑜𝑛,respectively,where head.𝜑(ℎ 𝑐𝑡)and𝜑(ℎ 𝑐)aretheintermediatelayer’srepresentation weusethecommentstoreplacethechildnodevalues. ofsourcecodeandSCT,respectively.SCALEintegratestheinfor- InFigure5b,nodesDtoFillustrateaninstanceofa“return” mationfromsourcecodeandSCT,thenobtainstheSCT-enhanced statement.SCALEidentifiesthetargetnodeE(highlightedinblue) representationasfollows: andsubstitutesthevalueofnodeF(highlightedinred)withthat (cid:18) 𝑄𝐾𝑇(cid:19) ofnodeD(highlightedingrey).Thisprocessenrichesthe“return” 𝐴𝑡𝑡𝑒𝑛𝑡𝑖𝑜𝑛(𝑄,𝐾,𝑉)=𝑠𝑜𝑓𝑡𝑚𝑎𝑥 ||𝐻 √ ·𝑉 (2) statementwithrelevantin-contextinformation,therebyenhancing 1 𝑑 themodel’scomprehensionofthecodeexecutionsequence.The where𝑇 denotes the transpose operator and𝑑 denotes the em- generatedstructurednaturallanguagecommentisexemplifiedin beddingsize.||and𝐻 aretheconcentrateoperatorandthehead lines5-6ofFigure4. number,respectively. LabeledStatements:Thelabeledstatementsfacilitatethedirect Finally,SCALEusesthemeanoperatorandleverageaclassifier transferofprogramcontroltoapredefinedstatement.Toexplain forsoftwarevulnerabilitydetection,whichcanbeformulatedas the“case”statement,wealsoformulateastructurednaturallan- follows: guagerule.Wecaptureboththetargettypenode𝑐𝑎𝑠𝑒anditsnext 𝑀 =𝜎(𝐶𝑙𝑎𝑠𝑠𝑖𝑓𝑖𝑒𝑟(𝑀𝑒𝑎𝑛(𝐴𝑡𝑡𝑒𝑛𝑡𝑖𝑜𝑛(𝑄,𝐾,𝑉)))) (3) sibling node from the comment tree. Subsequently, we employ commentstosubstitutethevalueofthe𝑐𝑜𝑛𝑠𝑡𝑎𝑛𝑡 −𝑒𝑥𝑝𝑟𝑒𝑠𝑠𝑖𝑜𝑛’s where𝜎denotesthesigmoidfunction.AndSCALEtrainstheclas- node. sifier[27]byminimizingtheCross-Entropy[15]loss. 3.2.2 SCTConstructionAlgorithm. Basedontheabovestructured 4 EXPERIMENTALSETUP naturallanguagerules,wethenintroducetheconstructionprocess 4.1 ResearchQuestions of SCTs. The overall SCT construction process is illustrated in Algorithm1,withanexampleshownFigure5b. Inthissection,weevaluatetheeffectivenessofSCALEbycompar- SCALEfirsttraversesthecommenttreeandconstructstheSCT ingitwiththestate-of-the-artvulnerabilitydetectionmethodsand usingtheaboverules.Duringthistraversal,SCALEsystematically focusonthefollowingfiveResearchQuestions(RQs):SCALE:ConstructingStructuredNaturalLanguageCommentTreesforSoftwareVulnerabilityDetection ISSTA2024,16-20September,2024,Vienna,Austria Table2:Statisticsofthedatasets. thepromptsusedin[21]toimprovetheirperformanceon vulnerabilitydetection. Dataset #Total #Vul. #Non-vul. Ratio(%) 4.4 EvaluationMetrics
FFMPeg+Qemu[65] 22,361 10,067 12,294 45.02 Followingthepreviouswork[18,20,65],wechoosethefollowing Reveal[10] 18,169 1,664 16,505 9.16 fourmetricstoevaluateSCALE’sperformance: SVulD[43] 28,730 5,260 23,470 18.31 • Accuracy(Acc):𝐴𝑐𝑐 = 𝑇𝑃+𝑇𝑇 𝑁𝑃+ +𝑇 𝐹𝑁 𝑁+𝐹𝑃.Accmeasuresthe percentageofcorrectlyclassifiedsamplesoutofallsamples. 𝑇𝑁 representsthenumberoftruenegativesand𝑇𝑃+𝑇𝑁 + RQ1: HoweffectiveisSCALEinvulnerabilitydetectioncom- 𝐹𝑁 +𝐹𝑃 representsthenumberofallsamples. paredwithexistingapproaches? • Precision(Pre):𝑃𝑟𝑒 = 𝑇𝑃𝑇 +𝑃 𝐹𝑃.Preisthepercentageoftrue RQ2: HoweffectiveisSCALEwhenappliedtootherpre-trained vulnerabilitiesoutofallthevulnerabilitiesthatareretrieved. models? 𝑇𝑃 and𝐹𝑃 denotethenumberoftruepositivesandfalse RQ3: Howdodifferentstructurednaturallanguagecomment positives,respectively. rulescontributetotheperformanceofSCALE? • Recall(Rec):𝑅𝑒𝑐 = 𝑇𝑃𝑇 +𝑃 𝐹𝑁.Recisthepercentageofvulner- RQ4: Whatistheinfluenceofdifferentmodulesonthedetec- abilitiesthataredetectedoutofallvulnerablecodesnippets. tionperformanceofSCALE? 𝑇𝑃 and𝐹𝑁 denotethenumberoftruepositivesandfalse RQ5: Whatistheinfluenceofhyper-parametersontheperfor- negatives,respectively. manceofSCALE? • F1score(F1):𝐹1=2× 𝑃 𝑃𝑟 𝑟𝑒 𝑒× +𝑅𝑅 𝑒𝑒 𝑐𝑐 .F1measurestheharmonic meanofPreandRecmetrics. 4.2 Datasets 4.5 ImplementationDetails Toanswerthequestionsabove,wechoosethreewidely-usedvul- nerabilitydatasets,includingFFMPeg+Qemu[65],Reveal[10],and Forallthesupervised-basedandpre-trainedmodel-basedmethods SVulD[43].TheFFMPeg+Qemudatasetoriginatesfromtwopop- exceptDevign,wedirectlyusethepubliclyavailablesourcecode ular C open-source projects. It comprises 22k code snippets, of andhyper-parametersreleasedbytheauthors.ForDevign,follow- which10khavebeenidentifiedasvulnerable.TheRevealdataset ingthepreviouswork[57,58],wetryourbesttoreproduceitbased has over 18kcode snippets,with around 2kof these exhibiting oncodereproducedbyotherresearchers[10].ForLLM-basedmeth- vulnerabilities.SVulD[43]isbasedonFanetal.[17]andcontains ods,wedownloadtheCode-llama-7bfromHuggingFace[29]and bothbefore-fixedandafter-fixedcodeinthetrainingset.Themodel evaluatethemonourserver.ForChatGPTandGPT-3.5-instruct, isrequiredtoidentifythebefore-fixedasvulnerableandafter-fixed weusetheOpenAI’spublicAPI“gpt-3.5-turbo-0301"and“gpt-3.5- asnon-vulnerablesimultaneouslyonthisdataset.Table2presents turbo-instruct"forexperiments.Andweusetheinitialparameter thestatisticsoftheexperimentaldatasets. providedbyOPENAI. Followingthepreviouswork[41,43,59,65],wesplitthedatasets Toensurethefairnessoftheexperiments,weusethesamedata intodisjointtraining,validation,andtestsetsinaratioof8:1:1. splittingforalltheapproachesinallresearchquestions.Weusethe Weusethetrainingsettotrainthebaselinemodels,usethevali- “gpt-3.5-turbo-0301"APIforthecommentgenerationandfine-tune dationsetforselectingbest-performancemodels,andevaluatethe thepre-trainedmodelUniXcoderwithalearningrateof2𝑒−5.The performanceofSCALEandotherbaselinesinthetestset. crossattentionheadnumber𝐻issetas8andthebatchsizeissetto 32.TheinfluenceoftheseparametersisdiscussedinSection5.5.All 4.3 Baselines experimentsareconductedonaserverwithNVIDIAA100-SXM4- ToverifytheeffectivenessofSCALE,wechoosethefollowingthree 40GBGPUs. typesofvulnerabilitydetectionapproachesasourbaselines: 5 EXPERIMENTALRESULTS • Supervised-basedmethods:WeuseDevign[65]andRe- veal[10],whicharewidelyadoptedasbaselinesinrecent 5.1 RQ1:EffectivenessofSCALE works[9,37,59].Theyconstructthejointgraphfromthe ToanswerRQ1,wecomparetheSCALEwiththreetypesofvulner- source code and use the GGNN model for function-level abilitydetectionbaselines.TheresultsareshowninTable3. vulnerabilitydetection. • PretrainedModel-basedmethods:Weselectthreepop- 5.1.1 ComparisonwithSupervisedandPre-trainedModel-based ular pre-trained code models, including CodeBERT [18], Methods. Specifically,wecompareSCALEwithtwosupervised- CodeT5[55],andUniXcoder[25]todetectvulnerabilities. based approaches (i.e., Devign and Reveal) and six pre-trained Besides,wealsoselectrecentstate-of-the-artmethodsthat model-basedapproaches(i.e,CodeBERT,CodeT5,UniXcoder,EPVD, furtherfinetunethesepre-trainedmodelsforvulnerability LineVul,andSVulD).FromtheresultsinTable3,weobservethat detection,includingEPVD[63],LineVul[20]andSVulD[43]. SCALEoutperformsallthesupervisedbaselinemethodsonthe • LLM-basedmethods:WealsochooseCodeLlama-7b[47], threedatasetsintermsofF1score,by2.96%forFFMPeg+Qemu, ChatGPT[11]andGPT3.5-instruct[24]toevaluatetheper- 13.47%forRevealand1.17%forSvulDdataset,respectively.When formance of LLMs for vulnerability detection. We follow consideringallthefourperformancemetricsinthethreedatasetsISSTA2024,16-20September,2024,Vienna,Austria TrovatoandTobin,etal. Table3:EvaluationresultsofSCALEcomparedwithvulnerabilitydetectionbaselinesonthethreedatasets.Theshadedcells
representtheperformanceofthebestmethodsineachmetric.Boldtextcellsrepresentthebestperformance.Theresultsof statisticalsignificancetestsarelistedinhttps://github.com/Xin-Cheng-Wen/Comment4Vul. Dataset FFMPeg+Qemu Reveal SVulD Metrics Acc Pre Rec F1 Acc Pre Rec F1 Acc Pre Rec F1 Devign 56.89 52.50 64.67 57.95 87.49 31.55 36.65 33.91 73.57 9.72 50.31 16.29 Reveal 61.07 55.50 70.70 62.19 81.77 31.55 61.14 41.62 82.58 12.92 40.08 19.31 CodeBERT 62.37 61.55 48.21 54.07 87.51 43.63 56.15 49.10 80.56 14.33 55.32 22.76 CodeT5 63.36 58.65 68.61 63.24 89.53 51.15 54.51 52.78 78.73 14.32 62.36 23.30 UnixCoder 65.19 59.93 59.98 59.96 88.48 47.44 68.44 56.04 77.54 15.11 72.24 24.99 EPVD 63.03 59.32 62.15 60.70 88.87 48.60 63.93 55.22 76.75 14.26 69.58 23.67 LineVul 62.37 61.55 48.21 54.07 87.51 43.63 56.15 49.10 80.57 15.95 64.45 25.58 SVulD - - - - - - - - 86.99 21.46 56.84 31.16 Codellama-7B 53.44 46.20 6.37 11.20 83.49 8.23 5.44 6.55 88.40 3.84 5.65 4.57 ChatGPT 53.85 49.72 14.35 22.28 81.77 9.52 8.37 8.91 88.79 14.38 25.81 18.47 GPT-3.5-Instruct 51.31 47.03 42.06 44.41 60.47 9.60 32.22 14.79 65.81 7.26 50.4 12.69 SCALE 66.18 61.88 68.69 65.11 90.02 52.32 78.69 62.85 87.63 22.56 57.03 32.33 (12combinationcasesaltogether),SCALEachievesthebestper- Answer to RQ1: SCALE achieves the best performance in formancein10outofthe12cases.Theseresultsdemonstratethat precision and F1 score compared with previous approaches. SCALEcapturesthevulnerabilitypatternsofcodemoreprecisely Specifically,SCALEoutperformsthebest-performingbaseline thanthesupervisedandpre-trainedmodel-basedbaselines.Wealso by2.96%,13.47%,and1.17%intermsofF1scoreonthethree noticethatthepre-trainedmodel-basedmethodsperformbetter datasets,respectively. thansupervised-basedmethods.Itcanbeattributedthatpre-trained model-basedmethodshaveleveragedgeneralcode-relatedknowl- edgeinthepre-trainingstage,whichenhancestheabilitytocapture 5.2 RQ2:EffectivenessonOtherPre-trained vulnerabilitypatterns. Models Moreover,experimentalresultsrevealthatthedetectionaccuracy Inthisresearchquestion,wewonderwhetherotherpre-trained ontheSVulDdatasetislowercomparedtotheotherdatasets.Itcan modelscanhavethesameeffectivenesswhenequippedwithour beattributedtothechallengethatcurrentvulnerabilitydetection proposedSCT.Toanswerthisquestion,wereplaceUniXcoderwith approachesstruggletodifferentiatebetweenbefore-andafter-fixed differentpre-trainedmodel-basedmethodsincludingCodeBERT codewhichoftenexhibitsonlysmallvariances.Despitethesechal- andEPVDtovalidatetheperformance.Theexperimentalresults lenges,SCALEdemonstratesimprovementintheF1scoreof1.17%, arepresentedinTable4. surpassingtheSVulDbaselinewhichisspecificallydesignedfor Theexperimentalresultsshowthatthestructurednaturallan- thissituation. guage comment is effective for fine-tuning existing pre-trained modelsinmostcases.Wefindaverageimprovementsondifferent 5.1.2 ComparisonwithLLM-basedMethods. WealsocompareSCALE datasetsby6.67%inCodeBERT,1.82%inEPVDand4.88%inUniX- withmultipleLLM-basedapproaches,includingCodeLlama-7b, coder,respectively,intermsofF1score.Inparticular,incorporating ChatGPTandGPT-3.5-instruct.Ingeneral,wecanfindthatthe structurednaturallanguagecommentenablesCodeBERTtosur- LLM-basedapproachesperformworsethanSCALEandsupervised passtheperformanceofthemajorityofexistingbaselinesonthe approachesmentionedbefore.Specifically,SCALEoutperformsthe FFMPeg+Qemudataset.ThisindicatesthatSCTprovidestheextra bestLLM’sbaselinesby9.63%,170.38%,83.83%,and150.53%interms explanationinstructurednaturallanguagewaysforintricatelogic ofaccuracy,precision,recall,andF1scorerespectively.Itindicates andleveragesthestructurednaturallanguagerulesforinvolving thatdespitetheextensivemodelsizeandtrainingdata,LLM-based thecodeexecutionsequence,whichcanbeeasilylearnedbyexist- approachesstillfacechallengesinvulnerabilitydetectionwithout ingpre-trainedmodel-basedmethods.AsforEPVD,althoughithas fine-tuningduetothelackofdomainknowledge. usedmultiplesyntaxcontrolflowpathstocapturevulnerabilitypat- terns,thestructurednaturallanguagecommentstillimprovesitby alleviatingtheheavyuseofoperatorsandpointers.ItimprovestheSCALE:ConstructingStructuredNaturalLanguageCommentTreesforSoftwareVulnerabilityDetection ISSTA2024,16-20September,2024,Vienna,Austria Table4:TheexperimentalresultsofapplyingSCTtotheexistingpre-trainedmodel-basedmethods. Dataset FFMPeg+Qemu Reveal SVulD Metrics Acc Pre Rec F1 Acc Pre Rec F1 Acc Pre Rec F1 64.24 59.07 72.11 64.94 88.78 48.30 67.62 56.41 80.06 15.28 62.74 24.58 CodeBERT#
(↑+1.87%) (↓-2.48%) (↑+23.90%) (↑+10.87%) (↑+1.27%) (↑+4.67%) (↑+11.47%) (↑+7.31%) (↓-0.50%) (↑+0.95%) (↑+7.42%) (↑+1.82%) 61.60 56.40 72.35 63.39 89.49 63.93 50.81 56.62 76.83 74.71 15.04 25.04 EPVD# (↓-1.43%) (↓-2.92%) (↑+10.20%) (↑+2.69%) (↑+0.62%) (↑+15.33%) (↓-13.12%) (↑+1.40%) (↑+0.08%) (↑+60.45%) (↓-54.54%) (↑+1.37%) 66.18 61.88 68.69 65.11 89.58 50.86 84.84 63.59 82.33 17.14 62.93 26.94 UniXcoder# (↑+0.99%) (↑+1.95%) (↑+8.71%) (↑+5.15%) (↑+1.10%) (↑+3.42%) (↑+16.40%) (↑+7.55%) (↑+4.79%) (↑+2.03%) (↓-9.31%) (↑+1.95%) F1scoreperformanceof2.69%,1.40%,and1.37%inthreedatasets, structurednaturallanguagerulesperformsbetterthanusing respectively. anysinglerule. AnswertoRQ2:SCTiseffectivefordifferentpre-trained models,whichaveragelyimprovestheF1scoreby6.67%in 5.4 RQ4:EffectivenessofDifferentModulesin CodeBERT,1.82%inEPVD,and4.88%inUniXcoder,respec- SCALE tively. Inthissection,weexploretheimpactofdifferentmodulesofSCALE includingStructuredNaturalLanguageCommentTreeConstruc- tion(SCTC)andSCT-EnhancedRepresentation(SER)module.The 5.3 RQ3:EffectivenessofDifferentStructured resultsareshowninTable6. NaturalLanguageRulesinSCALE Toinvestigatetheeffectivenessofeachcategoryofrule,wecon- 5.4.1 StructuredNaturalLanguageCommentTreeConstruction. To structfourvariantsofSCALEwithfourcategoriesofstructured exploretheeffectoftheSCTCmodule,wedeployonevariant(i.e., naturallanguagerules,includingselection,iteration,jump,and WithoutSCTC)byonlyusingthecommenttreeforSCT-enhanced labeledstatements.Allvariantsonlyusespecificstructurednatural representation.AsshowninTable6,theSCTCcanimprovethe languagerulestoconstructtheSCT. performanceofSCALEonalldatasets.Specifically,removingstruc- TheperformanceoffourvariantsandSCALEispresentedin turednaturallanguagecommentsleadstotheF1scoredropof1.29%, Table5.Theexperimentalresultsindicatethatthefourseparate 2.59%,and2.04%ondifferentdatasets.Especiallyontheimbalanced structurednaturallanguagerulesperformworsecomparedwith dataset(i.e.,Reveal),SCTCbooststheperformancemorethanon SCALEinmostcases.Specifically,weobservethatthevariants thebalanceddatasets(i.e.,DevignandSVulD),whichenhancesthe decreaseby0.63%∼1.06%inFFMPeg+Qemuand0.44%∼0.66%in fourparametricmetricsinRevealby0.75%foraccuracy,2.32%for Reveal,1.41%∼6.21%inSVulD,respectively,intermsofaccuracy. precision,2.87%forrecall,and2.59%forF1score,respectively.It Comparedwithotherstatements,theiterationstatementsandcor- indicatesthatstructurednaturallanguagecommentgenerationhas respondingstructurednaturallanguagerulesgenerallycontribute agreatereffectontheunbalanceddataset. more,whichoutperformsothervariantsin6outof12cases.We suppose that it is mainly caused by the structured natural lan- 5.4.2 SCT-EnhancedRepresentation. Tounderstandtheimpactof guagerulesofiterationstatementsreplacingmorecontents(i.e., theSERmodule,wealsodeployavariant(i.e.,WithoutSER)of init-expression,condition-expression,andloop-expression)andthe SCALEwithouttheSERphase.Inthisvariant,weonlyusestruc- constructedstructurednaturallanguagecommentscanexplainthe turednaturallanguagecommentsasinputwithoutthesourcecode. codemoreprecisely. Theperformancedegradationobservedacrossthreedatasetscon- Besides,wefindthatleveragingtheintegrationofallstructured sistentlydemonstrateanaveragedecreaseof3.42%inaccuracy naturallanguagerulesperformsbetterthanonlyusingasinglerule, and2.83%inF1score,respectively.TheseresultsindicatetheSCT- whichimprovestheF1scoreperformanceinthreedatasetsby0.77%, enhancedrepresentationcanbettercapturevulnerabilitypatterns. 1.35%,and1.63%,respectively.Thisindicatesthattheintegrationof allrulescanfurtherhelpthemodelunderstandthecode’sexecution sequencetoimprovetheperformanceofvulnerabilitydetection. AnswertoRQ4:BoththeSCTCandSERmodulesenhance theperformanceofSCALE.TheSCGphaseimprovesF1score AnswertoRQ3:Allstructurednaturallanguagerulescon- performanceonthreedatasetsby1.29%,2.59%,and2.04%,re- tribute to the performance of SCALE, with an F1 score im- spectively.TheSERmoduleimprovesSCALEby1.31%,3.61% provementof1.41%∼6.21%.Leveragingtheintegrationofall and3.56%,respectively.ISSTA2024,16-20September,2024,Vienna,Austria TrovatoandTobin,etal. Table5:TheperformanceofSCALEwithdifferentstructurednaturallanguagerulesonthreedifferentdatasets. Dataset FFMPeg+Qemu Reveal SVulD Metrics Acc Pre Rec F1 Acc Pre Rec F1 Acc Pre Rec F1 Selection 65.55 61.75 65.74 63.68 89.14 49.64 84.02 62.40 80.88 16.22 64.64 25.93
Iteration 65.12 60.23 70.84 65.10 89.18 49.76 84.01 62.50 80.92 16.35 65.21 26.14 Jump 65.45 61.21 67.65 64.27 88.92 49.04 83.61 61.82 80.75 16.22 65.21 25.98 Labeled 65.37 61.06 67.97 64.33 89.23 49.88 82.79 62.25 76.12 13.91 69.58 23.19 SCALE 66.18 61.88 68.69 65.11 89.58 50.86 84.84 63.59 82.33 17.14 62.93 26.94 Table6:TheperformanceofSCALEinthreedifferentdatasetswhenremovingthestructurednaturallanguagecommenttree constructionmodule(i.e.,WithoutSCTC)andremovingtheSCT-enhancedrepresentationmodule(i.e.,WithoutSER)infour metrics. Dataset FFMPeg+Qemu Reveal SVulD Metrics Acc Pre Rec F1 Acc Pre Rec F1 Acc Pre Rec F1 65.59 61.73 66.06 63.82 89.27 50.00 75.82 60.26 78.32 15.17 69.69 24.99 WithoutSCTC (↓0.59%) (↓0.15%) (↓2.63%) (↓1.29%) (↓0.75%) (↓2.32%) (↓2.87%) (↓2.59%) (↓4.01%) (↓1.97%) (↑6.76%) (↓2.04%) 64.28 59.68 68.53 63.80 88.96 49.02 71.72 59.24 75.02 13.90 73.57 23.38 WithoutSER (↓1.90%) (↓2.20%) (↓0.16%) (↓1.31%) (↓1.06%) (↓3.30%) (↓6.97%) (↓3.61%) (↓7.31%) (↓3.24%) (↑10.64%) (↓3.56%) SCALE 66.18 61.88 68.69 65.11 90.02 52.32 78.69 62.85 82.33 17.14 62.93 26.94 5.5 RQ5:InfluencesofHyper-paramaterson 70 70 SCALE 65 65 Inthissection,westudytheeffectoftwohyper-parameters(i.e., batchsizesandheadnumbers)ofSCALE.Weassigndifferentvalues 60 60 tothemandexaminetheirimpactontheperformanceofvulner- 55 55 abilitydetection.Duetothepagelimitation,weonlypresentthe 4 8 16 32 64 2 4 8 16 32 experimentalresultsofFFMPeg+QemuinFigure6..Experimental resultsforotherdatasetsareshownintherepository. (a)Batchsize (b)Headnumber 5.5.1 Batch Size. Figure 6a shows the performance of SCALE Figure6:Theimpactofbatchsizeandheadnumberonthe acrossfourmetricswithdifferentbatchsizesinFFMPeg+Qemu. modelperformanceintheFFMPeg+Qemudataset.Theblue, Specifically,theperformanceofSCALEincreaseswiththegrowth red,orange,andblacklinesdenotetheaccuracy,precision, ofbatchsizeingeneral,whichdemonstratestheimportanceof recallandF1scoremetrics,respectively. batchsizeinimprovingmodelgeneralization.Forbatchsizesex- ceeding 32, we observe that the SCALE’s performance exhibits relativestability.Additionally,weobservethatbatchsizeseriously influencesimbalanceddatasets(i.e.,Reveal),leadingtoperformance fluctuationby49.43%∼53.60%inprecisionand61.07%∼88.52%in recallmetrics.Conversely,thegrowthofperformanceinbalanced alldatasets.Therefore,weempiricallyuseeightheadnumbersfor datasets(i.e.,FFMPeg+QemuandSVulD)ismoresteadysincethe allthreedatasetsandachieve65.11%inFFMPeg+Qemu,63.59%in similar ratio between negative and positive samples makes the Reveal,and26.94%inSVulD,respectively,intermsoftheF1score. modellearningmorestable. 5.5.2 HeadNumber. Toevaluatetheimpactsoftheheadnumbers ofSCALE,wevaryitfrom2to32andpresenttheperformanceon Answer to RQ5: The hyper-parameter settings can impact fourmetricsinFigure6b.Ascanbeseen,SCALEachievesthebest theperformanceofSCALEintheFFMPeg+Qemu,Reveal,and accuracyperformancewhenthenumberofattentionheadsisset SVUlDdatasets.SCALEachievesthebestperformancewiththe aseight.Theperformanceofdifferentheadnumbersissimilarfor batchsizeof32andheadnumberof8.SCALE:ConstructingStructuredNaturalLanguageCommentTreesforSoftwareVulnerabilityDetection ISSTA2024,16-20September,2024,Vienna,Austria Table7:TheperformanceofSCALEinFFMPeg+Qemu[65] 7 CONCLUSION datasetwhenusingGPT-3.5-Instructincommenttreecon- Inthispaper,weproposeSCALE,astructurednaturallanguage structionphase. commenttree-basedvulnerabilitydetectionframeworkbasedon thepre-trainedmodels.Itmainlycomprisesacommenttreecon- Metrics Acc Pre Rec F1 structionforenhancingthemodel’sabilitytoinferthesemantics ofcodestatements,astructurednaturallanguagecommenttree CommentwithGPT3.5-Instruct 63.69 58.23 74.10 65.22 SCALE(CommentwithChatGPT) 66.18 61.88 68.69 65.11 construction module for explicitly involving code execution se- quence,andanSCT-enhancedrepresentationforwell-capturing vulnerabilitypatterns.Comparedwiththestate-of-the-artmeth- ods,theexperimentalresultsonthreepopulardatasetsvalidatethe effectivenessofSCALEforvulnerabilitydetection. 6 DISCUSSION DATAAVAILABILITY 6.1 InfluenceofLLM’sGeneratedComment Oursourcecodeandexperimentaldataareavailableat:https:// ToevaluatetheefficacyoftheSCTgeneratedbySCALE,wefur- github.com/Xin-Cheng-Wen/Comment4Vul. theremployGPT-3.5-instructtoconstructSCT.Duetoconstraints inresources,theexperimentsarelimitedtotheFFMPeg+Qemu REFERENCES datasetinTable7.TheresultsindicatethatbothGPT3.5-instruct [1] 2023.C++LanguageReference. https://learn.microsoft.com/en-us/cpp/cpp/cpp-