diff --git a/.venv/lib/python3.11/site-packages/ray/dashboard/__pycache__/__init__.cpython-311.pyc b/.venv/lib/python3.11/site-packages/ray/dashboard/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..422fc9ce0b1e1156f5a16aade6c8af3ec8db170c Binary files /dev/null and b/.venv/lib/python3.11/site-packages/ray/dashboard/__pycache__/__init__.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/ray/dashboard/__pycache__/agent.cpython-311.pyc b/.venv/lib/python3.11/site-packages/ray/dashboard/__pycache__/agent.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8727e7c11c798d48d0433b17dd70fb25b0792e30 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/ray/dashboard/__pycache__/agent.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/ray/dashboard/__pycache__/head.cpython-311.pyc b/.venv/lib/python3.11/site-packages/ray/dashboard/__pycache__/head.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2a08b2785637e3663ff2d56362a8f0fcb7bab3ed Binary files /dev/null and b/.venv/lib/python3.11/site-packages/ray/dashboard/__pycache__/head.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/ray/dashboard/__pycache__/optional_deps.cpython-311.pyc b/.venv/lib/python3.11/site-packages/ray/dashboard/__pycache__/optional_deps.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea64c78e6b94b1f3ab891185deae59a7699b3c42 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/ray/dashboard/__pycache__/optional_deps.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/ray/dashboard/__pycache__/state_aggregator.cpython-311.pyc b/.venv/lib/python3.11/site-packages/ray/dashboard/__pycache__/state_aggregator.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..93de5a4f4058b4cff69abc86d91dcdf5222bca62 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/ray/dashboard/__pycache__/state_aggregator.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/ray/dashboard/__pycache__/utils.cpython-311.pyc b/.venv/lib/python3.11/site-packages/ray/dashboard/__pycache__/utils.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fbd5215790fb160e49f97ca76824d35d217f412f Binary files /dev/null and b/.venv/lib/python3.11/site-packages/ray/dashboard/__pycache__/utils.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/asset-manifest.json b/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/asset-manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..781a3cf5f6fa8137fad5a48c420a9c3552ddfe33 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/asset-manifest.json @@ -0,0 +1,42 @@ +{ + "files": { + "main.css": "./static/css/main.388a904b.css", + "main.js": "./static/js/main.378317da.js", + "static/js/495.01ff0983.chunk.js": "./static/js/495.01ff0983.chunk.js", + "static/js/591.222f4f03.chunk.js": "./static/js/591.222f4f03.chunk.js", + "static/media/roboto-latin-900italic.woff": "./static/media/roboto-latin-900italic.c20d916c1a1b094c1cec.woff", + "static/media/roboto-latin-300italic.woff": "./static/media/roboto-latin-300italic.bd5b7a13f2c52b531a2a.woff", + "static/media/roboto-latin-500italic.woff": "./static/media/roboto-latin-500italic.18d00f739ff1e1c52db1.woff", + "static/media/roboto-latin-400italic.woff": "./static/media/roboto-latin-400italic.b1d9d9904bfca8802a63.woff", + "static/media/roboto-latin-100italic.woff": "./static/media/roboto-latin-100italic.451d4e559d6f57cdf6a1.woff", + "static/media/roboto-latin-700italic.woff": "./static/media/roboto-latin-700italic.9360531f9bb817f917f0.woff", + "static/media/roboto-latin-500.woff": "./static/media/roboto-latin-500.cea99d3e3e13a3a599a0.woff", + "static/media/roboto-latin-900.woff": "./static/media/roboto-latin-900.bac8362e7a6ea60b6983.woff", + "static/media/roboto-latin-100.woff": "./static/media/roboto-latin-100.a45108d3b34af91f9113.woff", + "static/media/roboto-latin-700.woff": "./static/media/roboto-latin-700.2267169ee7270a22a963.woff", + "static/media/roboto-latin-300.woff": "./static/media/roboto-latin-300.865f928cbabcc9f8f2b5.woff", + "static/media/roboto-latin-400.woff": "./static/media/roboto-latin-400.49ae34d4cc6b98c00c69.woff", + "static/media/roboto-latin-900italic.woff2": "./static/media/roboto-latin-900italic.cb5ad999740e9d8a8bd1.woff2", + "static/media/roboto-latin-300italic.woff2": "./static/media/roboto-latin-300italic.c64e7e354c88e613c77c.woff2", + "static/media/roboto-latin-400italic.woff2": "./static/media/roboto-latin-400italic.d022bc70dc1bf7b3425d.woff2", + "static/media/roboto-latin-500italic.woff2": "./static/media/roboto-latin-500italic.0d8bb5b3ee5f5dac9e44.woff2", + "static/media/roboto-latin-700italic.woff2": "./static/media/roboto-latin-700italic.7d8125ff7f707231fd89.woff2", + "static/media/roboto-latin-100italic.woff2": "./static/media/roboto-latin-100italic.7f839a8652da29745ce4.woff2", + "static/media/roboto-latin-500.woff2": "./static/media/roboto-latin-500.f5b74d7ffcdf85b9dd60.woff2", + "static/media/roboto-latin-700.woff2": "./static/media/roboto-latin-700.c18ee39fb002ad58b6dc.woff2", + "static/media/roboto-latin-100.woff2": "./static/media/roboto-latin-100.c2aa4ab115bf9c6057cb.woff2", + "static/media/roboto-latin-300.woff2": "./static/media/roboto-latin-300.37a7069dc30fc663c878.woff2", + "static/media/roboto-latin-400.woff2": "./static/media/roboto-latin-400.176f8f5bd5f02b3abfcf.woff2", + "static/media/roboto-latin-900.woff2": "./static/media/roboto-latin-900.870c8c1486f76054301a.woff2", + "static/media/logo.svg": "./static/media/logo.3704c1bbca650bb72a64b5d4c3fa5ced.svg", + "index.html": "./index.html", + "main.388a904b.css.map": "./static/css/main.388a904b.css.map", + "main.378317da.js.map": "./static/js/main.378317da.js.map", + "495.01ff0983.chunk.js.map": "./static/js/495.01ff0983.chunk.js.map", + "591.222f4f03.chunk.js.map": "./static/js/591.222f4f03.chunk.js.map" + }, + "entrypoints": [ + "static/css/main.388a904b.css", + "static/js/main.378317da.js" + ] +} \ No newline at end of file diff --git a/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/favicon.ico b/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/favicon.ico new file mode 100644 index 0000000000000000000000000000000000000000..9417b6ada1d3a2399d631c76f2fb49981adf27cf Binary files /dev/null and b/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/favicon.ico differ diff --git a/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/index.html b/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/index.html new file mode 100644 index 0000000000000000000000000000000000000000..e1af411fc45df64a64d810b66b28652aafadb2e0 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/index.html @@ -0,0 +1 @@ +Ray Dashboard
\ No newline at end of file diff --git a/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/speedscope-1.5.3/LICENSE b/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/speedscope-1.5.3/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..baf3e9d982fc948620518a9e57e8a718fcaea4ec --- /dev/null +++ b/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/speedscope-1.5.3/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 Jamie Wong + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/speedscope-1.5.3/demangle-cpp.8a387750.js b/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/speedscope-1.5.3/demangle-cpp.8a387750.js new file mode 100644 index 0000000000000000000000000000000000000000..73686d88c66534c4567a288953879bae5f36d220 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/speedscope-1.5.3/demangle-cpp.8a387750.js @@ -0,0 +1,4 @@ +parcelRequire=function(e,r,n,t){var i="function"==typeof parcelRequire&&parcelRequire,o="function"==typeof require&&require;function u(n,t){if(!r[n]){if(!e[n]){var f="function"==typeof parcelRequire&&parcelRequire;if(!t&&f)return f(n,!0);if(i)return i(n,!0);if(o&&"string"==typeof n)return o(n);var c=new Error("Cannot find module '"+n+"'");throw c.code="MODULE_NOT_FOUND",c}p.resolve=function(r){return e[n][1][r]||r};var l=r[n]=new u.Module(n);e[n][0].call(l.exports,p,l,l.exports,this)}return r[n].exports;function p(e){return u(p.resolve(e))}}u.isParcelRequire=!0,u.Module=function(e){this.id=e,this.bundle=u,this.exports={}},u.modules=e,u.cache=r,u.parent=i,u.register=function(r,n){e[r]=[function(e,r){r.exports=n},{}]};for(var f=0;f0&&$e.streams[2].object.output("\\n".charCodeAt(0)),$e.streams[3]&&$e.streams[3].object.output.buffer.length>0&&$e.streams[3].object.output("\\n".charCodeAt(0)))}},Je=Ja;Ve.unshift({func:function(){$e.ignorePermissions=!1,$e.init.initialized||$e.init()}}),Be.push({func:function(){$e.quit()}}),Ya(0),ae.buf=_(12,"void*",we),Module.callMain=function(r){function a(){for(var r=0;r<3;r++)i.push(0)}var e=r.length+1,i=[_(p("/bin/this.program"),"i8",we)];a();for(var v=0;v>2]=0|He.__str,Se[ri+4>>2]=0|He.__str1,Se[ri+16>>2]=0|He.__str2,Se[ri+20>>2]=0|He.__str3,Se[ri+32>>2]=0|He.__str4,Se[ri+36>>2]=0|He.__str5,Se[ri+48>>2]=0|He.__str6,Se[ri+52>>2]=0|He.__str7,Se[ri+64>>2]=0|He.__str8,Se[ri+68>>2]=0|He.__str7,Se[ri+80>>2]=0|He.__str9,Se[ri+84>>2]=0|He.__str10,Se[ri+96>>2]=0|He.__str11,Se[ri+100>>2]=0|He.__str12,Se[ri+112>>2]=0|He.__str13,Se[ri+116>>2]=0|He.__str14,Se[ri+128>>2]=0|He.__str15,Se[ri+132>>2]=0|He.__str16,Se[ri+144>>2]=0|He.__str17,Se[ri+148>>2]=0|He.__str18,Se[ri+160>>2]=0|He.__str19,Se[ri+164>>2]=0|He.__str20,Se[ri+176>>2]=0|He.__str21,Se[ri+180>>2]=0|He.__str22,Se[ri+192>>2]=0|He.__str23,Se[ri+196>>2]=0|He.__str24,Se[ri+208>>2]=0|He.__str25,Se[ri+212>>2]=0|He.__str26,Se[ri+224>>2]=0|He.__str27,Se[ri+228>>2]=0|He.__str28,Se[ri+240>>2]=0|He.__str29,Se[ri+244>>2]=0|He.__str30,Se[ri+256>>2]=0|He.__str31,Se[ri+260>>2]=0|He.__str32,Se[ri+272>>2]=0|He.__str33,Se[ri+276>>2]=0|He.__str34,Se[ri+288>>2]=0|He.__str35,Se[ri+292>>2]=0|He.__str36,Se[ri+304>>2]=0|He.__str37,Se[ri+308>>2]=0|He.__str38,Se[ri+320>>2]=0|He.__str39,Se[ri+324>>2]=0|He.__str40,Se[ri+336>>2]=0|He.__str41,Se[ri+340>>2]=0|He.__str42,Se[ri+352>>2]=0|He.__str43,Se[ri+356>>2]=0|He.__str44,Se[ri+368>>2]=0|He.__str45,Se[ri+372>>2]=0|He.__str46,Se[ri+384>>2]=0|He.__str47,Se[ri+388>>2]=0|He.__str48,Se[ri+400>>2]=0|He.__str49,Se[ri+404>>2]=0|He.__str119289,Se[ri+416>>2]=0|He.__str51,Se[ri+420>>2]=0|He.__str20,Se[ri+432>>2]=0|He.__str52,Se[ri+436>>2]=0|He.__str53,Se[ri+448>>2]=0|He.__str54,Se[ri+452>>2]=0|He.__str55,Se[ri+464>>2]=0|He.__str56,Se[ri+468>>2]=0|He.__str57,Se[ri+480>>2]=0|He.__str58,Se[ri+484>>2]=0|He.__str119289,Se[ri+496>>2]=0|He.__str59,Se[ri+500>>2]=0|He.__str60,Se[ri+512>>2]=0|He.__str61,Se[ri+516>>2]=0|He.__str62,Se[ri+528>>2]=0|He.__str63,Se[ri+532>>2]=0|He.__str64,Se[ri+544>>2]=0|He.__str65,Se[ri+548>>2]=0|He.__str66,Se[ri+560>>2]=0|He.__str67,Se[ri+564>>2]=0|He.__str68,Se[ri+576>>2]=0|He.__str69,Se[ri+580>>2]=0|He.__str70,Se[ri+592>>2]=0|He.__str71,Se[ri+596>>2]=0|He.__str72,Se[ri+608>>2]=0|He.__str73,Se[ri+612>>2]=0|He.__str74,Se[ri+624>>2]=0|He.__str75,Se[ri+628>>2]=0|He.__str76,Se[ri+640>>2]=0|He.__str77,Se[ri+644>>2]=0|He.__str72,Se[ri+656>>2]=0|He.__str78,Se[ri+660>>2]=0|He.__str79,Se[ri+672>>2]=0|He.__str80,Se[ri+676>>2]=0|He.__str81,Se[ri+688>>2]=0|He.__str82,Se[ri+692>>2]=0|He.__str83,Se[ri+704>>2]=0|He.__str84,Se[ri+708>>2]=0|He.__str85,Se[ri+720>>2]=0|He.__str86,Se[ri+724>>2]=0|He.__str87,Se[ri+736>>2]=0|He.__str88,Se[ri+740>>2]=0|He.__str89,Se[ri+752>>2]=0|He.__str90,Se[ri+756>>2]=0|He.__str91,Se[ri+768>>2]=0|He.__str92,Se[ri+772>>2]=0|He.__str91,Se[ai>>2]=0|He.__str145315,Se[ai+8>>2]=0|He.__str145315,Se[ai+20>>2]=0|He.__str167337,Se[ai+28>>2]=0|He.__str95,Se[ai+40>>2]=0|He.__str146316,Se[ai+48>>2]=0|He.__str97,Se[ai+60>>2]=0|He.__str155325,Se[ai+68>>2]=0|He.__str155325,Se[ai+80>>2]=0|He.__str156326,Se[ai+88>>2]=0|He.__str156326,Se[ai+100>>2]=0|He.__str154324,Se[ai+108>>2]=0|He.__str154324,Se[ai+120>>2]=0|He.__str101,Se[ai+128>>2]=0|He.__str101,Se[ai+140>>2]=0|He.__str147317,Se[ai+148>>2]=0|He.__str147317,Se[ai+160>>2]=0|He.__str150320,Se[ai+168>>2]=0|He.__str150320,Se[ai+180>>2]=0|He.__str151321,Se[ai+188>>2]=0|He.__str105,Se[ai+220>>2]=0|He.__str152322,Se[ai+228>>2]=0|He.__str152322,Se[ai+240>>2]=0|He.__str153323,Se[ai+248>>2]=0|He.__str153323,Se[ai+260>>2]=0|He.__str165335,Se[ai+268>>2]=0|He.__str165335,Se[ai+280>>2]=0|He.__str166336,Se[ai+288>>2]=0|He.__str166336,Se[ai+360>>2]=0|He.__str148318,Se[ai+368>>2]=0|He.__str148318,Se[ai+380>>2]=0|He.__str149319,Se[ai+388>>2]=0|He.__str149319,Se[ai+420>>2]=0|He.__str84254,Se[ai+428>>2]=0|He.__str84254,Se[ai+440>>2]=0|He.__str168338,Se[ai+448>>2]=0|He.__str146316,Se[ai+460>>2]=0|He.__str114,Se[ai+468>>2]=0|He.__str152322,Se[ai+480>>2]=0|He.__str115,Se[ai+488>>2]=0|He.__str115,Se[ai+500>>2]=0|He.__str110280,Se[ai+508>>2]=0|He.__str110280,Se[ei+4>>2]=0|He.__str152,Se[ei+12>>2]=0|He.__str152,Se[ei+32>>2]=0|He.__str153,Se[ei+40>>2]=0|He.__str153,Se[ei+48>>2]=0|He.__str154,Se[ei+60>>2]=0|He.__str155,Se[ei+68>>2]=0|He.__str155,Se[ei+76>>2]=0|He.__str156,Se[ei+88>>2]=0|He.__str157,Se[ei+96>>2]=0|He.__str158,Se[ei+104>>2]=0|He.__str156,Se[ei+116>>2]=0|He.__str159,Se[ei+124>>2]=0|He.__str160,Se[ei+132>>2]=0|He.__str161,Se[ei+144>>2]=0|He.__str162,Se[ei+152>>2]=0|He.__str163,Se[ei+160>>2]=0|He.__str164,Se[ei+172>>2]=0|He.__str165,Se[ei+180>>2]=0|He.__str166,Se[ei+188>>2]=0|He.__str167,Se[si+4>>2]=bi,Se[ni+4>>2]=ki,oi=_([2,0,0,0,0],["i8*",0,0,0,0],we),Se[bi>>2]=oi+8|0,Se[bi+4>>2]=0|He.__ZTSSt9bad_alloc,Se[bi+8>>2]=li,Se[ki>>2]=oi+8|0,Se[ki+4>>2]=0|He.__ZTSSt20bad_array_new_length,Se[ki+8>>2]=bi,ui=16,ci=6,hi=18,di=6,wi=6,pe=[0,0,Jr,0,va,0,ya,0,ga,0,wa,0,Sa,0,pa,0,Ea,0,ma,0],Module.FUNCTION_TABLE=pe,Module.run=ee,Module.preRun&&Module.preRun(),0==Ke){ee()}Module.postRun&&Module.postRun(),Module.___cxa_demangle=G;var pi=v("__cxa_demangle","string",["string","string","number","number"]);return function(r){return pi(r,"",1,0)}}();\n'; +},{}]},{},[180], null) +//# sourceMappingURL=demangle-cpp.8a387750.map \ No newline at end of file diff --git a/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/speedscope-1.5.3/favicon-32x32.1165a94e.png b/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/speedscope-1.5.3/favicon-32x32.1165a94e.png new file mode 100644 index 0000000000000000000000000000000000000000..d49690de3382ff513d1426620d0b7acb53135da2 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/speedscope-1.5.3/favicon-32x32.1165a94e.png differ diff --git a/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/speedscope-1.5.3/release.txt b/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/speedscope-1.5.3/release.txt new file mode 100644 index 0000000000000000000000000000000000000000..b465301e73430b413e07fc382dc5f73be86c7769 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/speedscope-1.5.3/release.txt @@ -0,0 +1,3 @@ +speedscope@1.5.3 +Thu Jan 16 00:10:56 PST 2020 +707462e9cffec2bda49587c39d621ba89d1b51cb diff --git a/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/speedscope-1.5.3/speedscope.75eb7d8e.js b/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/speedscope-1.5.3/speedscope.75eb7d8e.js new file mode 100644 index 0000000000000000000000000000000000000000..97c9f10f32ffdbb65e990026b55e12d18392f8a0 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/speedscope-1.5.3/speedscope.75eb7d8e.js @@ -0,0 +1,173 @@ +parcelRequire=function(e,r,n,t){var i="function"==typeof parcelRequire&&parcelRequire,o="function"==typeof require&&require;function u(n,t){if(!r[n]){if(!e[n]){var f="function"==typeof parcelRequire&&parcelRequire;if(!t&&f)return f(n,!0);if(i)return i(n,!0);if(o&&"string"==typeof n)return o(n);var c=new Error("Cannot find module '"+n+"'");throw c.code="MODULE_NOT_FOUND",c}p.resolve=function(r){return e[n][1][r]||r};var l=r[n]=new u.Module(n);e[n][0].call(l.exports,p,l,l.exports,this)}return r[n].exports;function p(e){return u(p.resolve(e))}}u.isParcelRequire=!0,u.Module=function(e){this.id=e,this.bundle=u,this.exports={}},u.modules=e,u.cache=r,u.parent=i,u.register=function(r,n){e[r]=[function(e,r){r.exports=n},{}]};for(var f=0;f2;)n.push(arguments[s]);for(i&&null!=i.children&&(n.length||n.push(i.children),delete i.children);n.length;)if((a=n.pop())&&void 0!==a.pop)for(s=a.length;s--;)n.push(a[s]);else"boolean"==typeof a&&(a=null),(p="function"!=typeof r)&&(null==a?a="":"number"==typeof a?a=String(a):"string"!=typeof a&&(p=!1)),p&&l?c[c.length-1]+=a:c===o?c=[a]:c.push(a),l=p;var u=new e;return u.nodeName=r,u.children=c,u.attributes=null==i?void 0:i,u.key=null==i?void 0:i.key,void 0!==t.vnode&&t.vnode(u),u}function i(e,t){for(var n in t)e[n]=t[n];return e}var l="function"==typeof Promise?Promise.resolve().then.bind(Promise.resolve()):setTimeout;function a(e,t){return r(e.nodeName,i(i({},e.attributes),t),arguments.length>2?[].slice.call(arguments,2):e.children)}var p=/acit|ex(?:s|g|n|p|$)|rph|ows|mnc|ntw|ine[ch]|zoo|^ord/i,s=[];function c(e){!e._dirty&&(e._dirty=!0)&&1==s.push(e)&&(t.debounceRendering||l)(u)}function u(){var e,t=s;for(s=[];e=t.pop();)e._dirty&&A(e)}function f(e,t,n){return"string"==typeof t||"number"==typeof t?void 0!==e.splitText:"string"==typeof t.nodeName?!e._componentConstructor&&d(e,t.nodeName):n||e._componentConstructor===t.nodeName}function d(e,t){return e.normalizedNodeName===t||e.nodeName.toLowerCase()===t.toLowerCase()}function _(e){var t=i({},e.attributes);t.children=e.children;var n=e.nodeName.defaultProps;if(void 0!==n)for(var o in n)void 0===t[o]&&(t[o]=n[o]);return t}function v(e,t){var n=t?document.createElementNS("http://www.w3.org/2000/svg",e):document.createElement(e);return n.normalizedNodeName=e,n}function m(e){var t=e.parentNode;t&&t.removeChild(e)}function h(e,t,n,o,r){if("className"===t&&(t="class"),"key"===t);else if("ref"===t)n&&n(null),o&&o(e);else if("class"!==t||r)if("style"===t){if(o&&"string"!=typeof o&&"string"!=typeof n||(e.style.cssText=o||""),o&&"object"==typeof o){if("string"!=typeof n)for(var i in n)i in o||(e.style[i]="");for(var i in o)e.style[i]="number"==typeof o[i]&&!1===p.test(i)?o[i]+"px":o[i]}}else if("dangerouslySetInnerHTML"===t)o&&(e.innerHTML=o.__html||"");else if("o"==t[0]&&"n"==t[1]){var l=t!==(t=t.replace(/Capture$/,""));t=t.toLowerCase().substring(2),o?n||e.addEventListener(t,y,l):e.removeEventListener(t,y,l),(e._listeners||(e._listeners={}))[t]=o}else if("list"!==t&&"type"!==t&&!r&&t in e)b(e,t,null==o?"":o),null!=o&&!1!==o||e.removeAttribute(t);else{var a=r&&t!==(t=t.replace(/^xlink\:?/,""));null==o||!1===o?a?e.removeAttributeNS("http://www.w3.org/1999/xlink",t.toLowerCase()):e.removeAttribute(t):"function"!=typeof o&&(a?e.setAttributeNS("http://www.w3.org/1999/xlink",t.toLowerCase(),o):e.setAttribute(t,o))}else e.className=o||""}function b(e,t,n){try{e[t]=n}catch(e){}}function y(e){return this._listeners[e.type](t.event&&t.event(e)||e)}var x=[],C=0,g=!1,N=!1;function k(){for(var e;e=x.pop();)t.afterMount&&t.afterMount(e),e.componentDidMount&&e.componentDidMount()}function w(e,t,n,o,r,i){C++||(g=null!=r&&void 0!==r.ownerSVGElement,N=null!=e&&!("__preactattr_"in e));var l=S(e,t,n,o,i);return r&&l.parentNode!==r&&r.appendChild(l),--C||(N=!1,i||k()),l}function S(e,t,n,o,r){var i=e,l=g;if(null!=t&&"boolean"!=typeof t||(t=""),"string"==typeof t||"number"==typeof t)return e&&void 0!==e.splitText&&e.parentNode&&(!e._component||r)?e.nodeValue!=t&&(e.nodeValue=t):(i=document.createTextNode(t),e&&(e.parentNode&&e.parentNode.replaceChild(i,e),L(e,!0))),i.__preactattr_=!0,i;var a=t.nodeName;if("function"==typeof a)return D(e,t,n,o);if(g="svg"===a||"foreignObject"!==a&&g,a=String(a),(!e||!d(e,a))&&(i=v(a,g),e)){for(;e.firstChild;)i.appendChild(e.firstChild);e.parentNode&&e.parentNode.replaceChild(i,e),L(e,!0)}var p=i.firstChild,s=i.__preactattr_,c=t.children;if(null==s){s=i.__preactattr_={};for(var u=i.attributes,f=u.length;f--;)s[u[f].name]=u[f].value}return!N&&c&&1===c.length&&"string"==typeof c[0]&&null!=p&&void 0!==p.splitText&&null==p.nextSibling?p.nodeValue!=c[0]&&(p.nodeValue=c[0]):(c&&c.length||null!=p)&&U(i,c,n,o,N||null!=s.dangerouslySetInnerHTML),P(i,t.attributes,s),g=l,i}function U(e,t,n,o,r){var i,l,a,p,s,c=e.childNodes,u=[],d={},_=0,v=0,h=c.length,b=0,y=t?t.length:0;if(0!==h)for(var x=0;x0?"Unexpected "+(c.length>1?"keys":"key")+' "'+c.join('", "')+'" found in '+a+'. Expected to find one of the known reducer keys instead: "'+i.join('", "')+'". Unexpected keys will be ignored.':void 0}function f(e){Object.keys(e).forEach(function(t){var r=e[t];if(void 0===r(void 0,{type:n.INIT}))throw new Error('Reducer "'+t+"\" returned undefined during initialization. If the state passed to the reducer is undefined, you must explicitly return the initial state. The initial state may not be undefined. If you don't want to set a value for this reducer, you can use null instead of undefined.");if(void 0===r(void 0,{type:"@@redux/PROBE_UNKNOWN_ACTION_"+Math.random().toString(36).substring(7).split("").join(".")}))throw new Error('Reducer "'+t+"\" returned undefined when probed with a random type. Don't try to handle "+n.INIT+' or other actions in "redux/*" namespace. They are considered private. Instead, you must return the current state for any unknown actions, unless it is undefined, in which case you must return the initial state, regardless of the action type. The initial state may not be undefined, but can be null.')})}function l(e){for(var t=Object.keys(e),r={},n=0;n0&&void 0!==arguments[0]?arguments[0]:{},t=arguments[1];if(u)throw u;for(var n=!1,o={},a=0;a=0||Object.prototype.hasOwnProperty.call(t,r)&&(n[r]=t[r]);return n},h=function(t,e){if(!t)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!e||"object"!=typeof e&&"function"!=typeof e?t:e},b=!1;function y(){b||(b=!0,p(" does not support changing `store` on the fly. It is most likely that you see this error because you updated to Redux 2.x and React Redux 2.x which no longer hot reload reducers automatically. See https://github.com/reactjs/react-redux/releases/tag/v2.0.0 for the migration instructions."))}function v(){var t,n=arguments.length>0&&void 0!==arguments[0]?arguments[0]:"store",o=arguments[1]||n+"Subscription",i=function(t){function e(r,o){a(this,e);var i=h(this,t.call(this,r,o));return i[n]=r.store,i}return f(e,t),e.prototype.getChildContext=function(){var t;return(t={})[n]=this[n],t[o]=null,t},e.prototype.render=function(){return r.only(this.props.children)},e}(e.Component);return i.prototype.componentWillReceiveProps=function(t){this[n]!==t.store&&y()},i.childContextTypes=((t={})[n]=u.isRequired,t[o]=s,t),i}var m=v(),P={childContextTypes:!0,contextTypes:!0,defaultProps:!0,displayName:!0,getDefaultProps:!0,getDerivedStateFromProps:!0,mixins:!0,propTypes:!0,type:!0},O={name:!0,length:!0,prototype:!0,caller:!0,callee:!0,arguments:!0,arity:!0},S=Object.defineProperty,g=Object.getOwnPropertyNames,w=Object.getOwnPropertySymbols,C=Object.getOwnPropertyDescriptor,j=Object.getPrototypeOf,T=j&&j(Object);function x(t,e,n){if("string"!=typeof e){if(T){var r=j(e);r&&r!==T&&x(t,r,n)}var o=g(e);w&&(o=o.concat(w(e)));for(var i=0;i1&&void 0!==arguments[1]?arguments[1]:{},i=o.getDisplayName,p=void 0===i?function(t){return"ConnectAdvanced("+t+")"}:i,c=o.methodName,b=void 0===c?"connectAdvanced":c,y=o.renderCountProp,v=void 0===y?void 0:y,m=o.shouldHandleStateChanges,P=void 0===m||m,O=o.storeKey,S=void 0===O?"store":O,g=o.withRef,w=void 0!==g&&g,C=l(o,["getDisplayName","methodName","renderCountProp","shouldHandleStateChanges","storeKey","withRef"]),j=S+"Subscription",T=R++,x=((n={})[S]=u,n[j]=s,n),E=((r={})[j]=s,r);return function(n){q("function"==typeof n,"You must pass a component to the function returned by "+b+". Instead received "+JSON.stringify(n));var r=n.displayName||n.name||"Component",o=p(r),i=d({},C,{getDisplayName:p,methodName:b,renderCountProp:v,shouldHandleStateChanges:P,storeKey:S,withRef:w,displayName:o,wrappedComponentName:r,WrappedComponent:n}),s=function(r){function s(t,e){a(this,s);var n=h(this,r.call(this,t,e));return n.version=T,n.state={},n.renderCount=0,n.store=t[S]||e[S],n.propsMode=Boolean(t[S]),n.setWrappedInstance=n.setWrappedInstance.bind(n),q(n.store,'Could not find "'+S+'" in either the context or props of "'+o+'". Either wrap the root component in a , or explicitly pass "'+S+'" as a prop to "'+o+'".'),n.initSelector(),n.initSubscription(),n}return f(s,r),s.prototype.getChildContext=function(){var t,e=this.propsMode?null:this.subscription;return(t={})[j]=e||this.context[j],t},s.prototype.componentDidMount=function(){P&&(this.subscription.trySubscribe(),this.selector.run(this.props),this.selector.shouldComponentUpdate&&this.forceUpdate())},s.prototype.componentWillReceiveProps=function(t){this.selector.run(t)},s.prototype.shouldComponentUpdate=function(){return this.selector.shouldComponentUpdate},s.prototype.componentWillUnmount=function(){this.subscription&&this.subscription.tryUnsubscribe(),this.subscription=null,this.notifyNestedSubs=W,this.store=null,this.selector.run=W,this.selector.shouldComponentUpdate=!1},s.prototype.getWrappedInstance=function(){return q(w,"To access the wrapped instance, you need to specify { withRef: true } in the options argument of the "+b+"() call."),this.wrappedInstance},s.prototype.setWrappedInstance=function(t){this.wrappedInstance=t},s.prototype.initSelector=function(){var e=t(this.store.dispatch,i);this.selector=F(e,this.store),this.selector.run(this.props)},s.prototype.initSubscription=function(){if(P){var t=(this.propsMode?this.props:this.context)[j];this.subscription=new M(this.store,t,this.onStateChange.bind(this)),this.notifyNestedSubs=this.subscription.notifyNestedSubs.bind(this.subscription)}},s.prototype.onStateChange=function(){this.selector.run(this.props),this.selector.shouldComponentUpdate?(this.componentDidUpdate=this.notifyNestedSubsOnComponentDidUpdate,this.setState(I)):this.notifyNestedSubs()},s.prototype.notifyNestedSubsOnComponentDidUpdate=function(){this.componentDidUpdate=void 0,this.notifyNestedSubs()},s.prototype.isSubscribed=function(){return Boolean(this.subscription)&&this.subscription.isSubscribed()},s.prototype.addExtraProps=function(t){if(!(w||v||this.propsMode&&this.subscription))return t;var e=d({},t);return w&&(e.ref=this.setWrappedInstance),v&&(e[v]=this.renderCount++),this.propsMode&&this.subscription&&(e[j]=this.subscription),e},s.prototype.render=function(){var t=this.selector;if(t.shouldComponentUpdate=!1,t.error)throw t.error;return(0,e.h)(n,this.addExtraProps(t.props))},s}(e.Component);return s.WrappedComponent=n,s.displayName=o,s.childContextTypes=E,s.contextTypes=x,s.prototype.componentWillUpdate=function(){var t=this;if(this.version!==T){this.version=T,this.initSelector();var e=[];this.subscription&&(e=this.subscription.listeners.get(),this.subscription.tryUnsubscribe()),this.initSubscription(),P&&(this.subscription.trySubscribe(),e.forEach(function(e){return t.subscription.listeners.subscribe(e)}))}},N(s,n)}}var _=Object.prototype.hasOwnProperty;function B(t,e){return t===e?0!==t||0!==e||1/t==1/e:t!=t&&e!=e}function H(t,e){if(B(t,e))return!0;if("object"!==(void 0===t?"undefined":c(t))||null===t||"object"!==(void 0===e?"undefined":c(e))||null===e)return!1;var n=Object.keys(t),r=Object.keys(e);if(n.length!==r.length)return!1;for(var o=0;o=0;r--){var o=e[r](t);if(o)return o}return function(e,r){throw new Error("Invalid value of type "+(void 0===t?"undefined":c(t))+" for "+n+" argument when connecting component "+r.wrappedComponentName+".")}}function Wt(t,e){return t===e}function Ft(){var t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},e=t.connectHOC,n=void 0===e?A:e,r=t.mapStateToPropsFactories,o=void 0===r?Ct:r,i=t.mapDispatchToPropsFactories,s=void 0===i?St:i,u=t.mergePropsFactories,p=void 0===u?qt:u,c=t.selectorFactory,a=void 0===c?Rt:c;return function(t,e,r){var i=arguments.length>3&&void 0!==arguments[3]?arguments[3]:{},u=i.pure,c=void 0===u||u,f=i.areStatesEqual,h=void 0===f?Wt:f,b=i.areOwnPropsEqual,y=void 0===b?H:b,v=i.areStatePropsEqual,m=void 0===v?H:v,P=i.areMergedPropsEqual,O=void 0===P?H:P,S=l(i,["pure","areStatesEqual","areOwnPropsEqual","areStatePropsEqual","areMergedPropsEqual"]),g=It(t,o,"mapStateToProps"),w=It(e,s,"mapDispatchToProps"),C=It(r,p,"mergeProps");return n(a,d({methodName:"connect",getDisplayName:function(t){return"Connect("+t+")"},shouldHandleStateChanges:Boolean(t),initMapStateToProps:g,initMapDispatchToProps:w,initMergeProps:C,pure:c,areStatesEqual:h,areOwnPropsEqual:y,areStatePropsEqual:m,areMergedPropsEqual:O},S))}}var At=Ft(),_t={Provider:m,connect:At,connectAdvanced:A};exports.Provider=m,exports.connect=At,exports.connectAdvanced=A,exports.default=_t; +},{"preact":24,"redux":31}],36:[function(require,module,exports) { +"use strict";Object.defineProperty(exports,"__esModule",{value:!0}),exports.StatelessComponent=void 0,exports.actionCreator=n,exports.setter=o,exports.createContainer=s,exports.bindActionCreator=c;var e=require("preact-redux"),t=require("preact");const r=new Set;function n(e){if(r.has(e))throw new Error(`Cannot re-use action type name: ${e}`);const t=(t={})=>({type:e,payload:t});return t.matches=(t=>t.type===e),t}function o(e,t){return(r=t,n)=>e.matches(n)?n.payload:r}function s(t,r){return(0,e.connect)(e=>e,e=>({dispatch:e}),(e,t,n)=>r(e,t.dispatch,n))(t)}class a extends t.Component{}function c(e,t){return r=>{e(t(r))}}exports.StatelessComponent=a; +},{"preact-redux":26,"preact":24}],102:[function(require,module,exports) { +"use strict";function t(t,i,s){return ts?s:t}Object.defineProperty(exports,"__esModule",{value:!0}),exports.clamp=t;class i{constructor(t,i){this.x=t,this.y=i}withX(t){return new i(t,this.y)}withY(t){return new i(this.x,t)}plus(t){return new i(this.x+t.x,this.y+t.y)}minus(t){return new i(this.x-t.x,this.y-t.y)}times(t){return new i(this.x*t,this.y*t)}timesPointwise(t){return new i(this.x*t.x,this.y*t.y)}dividedByPointwise(t){return new i(this.x/t.x,this.y/t.y)}dot(t){return this.x*t.x+this.y*t.y}equals(t){return this.x===t.x&&this.y===t.y}approxEquals(t,i=1e-9){return Math.abs(this.x-t.x){if(t.actions.flamechart.setHoveredNode.matches(a)&&r(a)){const{hover:t}=a.payload.args;return Object.assign({},e,{hover:t})}if(t.actions.flamechart.setSelectedNode.matches(a)&&r(a)){const{selectedNode:t}=a.payload.args;return Object.assign({},e,{selectedNode:t})}if(t.actions.flamechart.setConfigSpaceViewportRect.matches(a)&&r(a)){const{configSpaceViewportRect:t}=a.payload.args;return Object.assign({},e,{configSpaceViewportRect:t})}if(t.actions.flamechart.setLogicalSpaceViewportSize.matches(a)&&r(a)){const{logicalSpaceViewportSize:t}=a.payload.args;return Object.assign({},e,{logicalSpaceViewportSize:t})}return t.actions.setViewMode.matches(a)?Object.assign({},e,{hover:null}):e}}!function(e){e.LEFT_HEAVY="LEFT_HEAVY",e.CHRONO="CHRONO",e.SANDWICH_INVERTED_CALLERS="SANDWICH_INVERTED_CALLERS",e.SANDWICH_CALLEES="SANDWICH_CALLEES"}(a||(exports.FlamechartID=a={})); +},{"../lib/math":102,"./actions":40}],100:[function(require,module,exports) { +"use strict";Object.defineProperty(exports,"__esModule",{value:!0}),exports.createSandwichView=l;var e=require("./flamechart-view-state"),a=require("./actions");function l(l){const r=(0,e.createFlamechartViewStateReducer)(e.FlamechartID.SANDWICH_CALLEES,l),t=(0,e.createFlamechartViewStateReducer)(e.FlamechartID.SANDWICH_INVERTED_CALLERS,l);return(e={callerCallee:null},c)=>{if(a.actions.sandwichView.setSelectedFrame.matches(c)&&function(e){const{payload:a}=e;return a.profileIndex===l}(c))return null==c.payload.args?Object.assign({},e,{callerCallee:null}):Object.assign({},e,{callerCallee:{selectedFrame:c.payload.args,calleeFlamegraph:r(void 0,c),invertedCallerFlamegraph:t(void 0,c)}});const{callerCallee:n}=e;if(n){const{calleeFlamegraph:a,invertedCallerFlamegraph:l}=n,i=r(a,c),s=t(l,c);return i===a&&s===l?e:Object.assign({},e,{callerCallee:Object.assign({},n,{calleeFlamegraph:i,invertedCallerFlamegraph:s})})}return e}} +},{"./flamechart-view-state":98,"./actions":40}],70:[function(require,module,exports) { +"use strict";function t(t){return t[t.length-1]||null}function e(t,e){t.sort(function(t,r){return e(t)99?e=">99%":t<.01?e="<0.01%":t<1?e=`${t.toFixed(2)}%`:t<10&&(e=`${t.toFixed(1)}%`),e}function f(t){return t-Math.floor(t)}function h(t){return 2*Math.abs(f(t)-.5)-1}function g(t,e,r,n,o=1){for(console.assert(!isNaN(o)&&!isNaN(n));;){if(e-t<=o)return[t,e];const s=(e+t)/2;r(s){let n;return null==e?(n=t(r),e={args:r,result:n},n):x(e.args,r)?e.result:(e.args=r,e.result=t(r),e.result)}}function y(t){let e=null;return r=>{let n;return null==e?(n=t(r),e={args:r,result:n},n):e.args===r?e.result:(e.args=r,e.result=t(r),e.result)}}function w(t){let e=null;return()=>(null==e&&(e={result:t()}),e.result)}exports.KeyedSet=s;const E=w(()=>{const t="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",e=new Map;for(let r=0;r>4,"="!==u&&(o[s++]=(15&c)<<4|f>>2),"="!==a&&(o[s++]=(7&f)<<6|h)}if(s!==n)throw new Error(`Expected to decode ${n} bytes, but only decoded ${s})`);return o} +},{}],52:[function(require,module,exports) { +"use strict";Object.defineProperty(exports,"__esModule",{value:!0}),exports.profileGroup=void 0,exports.actionCreatorWithIndex=n;var e=require("./flamechart-view-state"),t=require("./sandwich-view-state"),i=require("../lib/typed-redux"),r=require("./actions"),a=require("../lib/math"),o=require("../lib/utils");function n(e){return(0,i.actionCreator)(e)}function l(i,r){const a=(0,e.createFlamechartViewStateReducer)(e.FlamechartID.CHRONO,r),n=(0,e.createFlamechartViewStateReducer)(e.FlamechartID.LEFT_HEAVY,r),l=(0,t.createSandwichView)(r);return(e,t)=>{if(void 0===e)return{profile:i,chronoViewState:a(void 0,t),leftHeavyViewState:n(void 0,t),sandwichViewState:l(void 0,t)};const r={profile:i,chronoViewState:a(e.chronoViewState,t),leftHeavyViewState:n(e.leftHeavyViewState,t),sandwichViewState:l(e.sandwichViewState,t)};return(0,o.objectsHaveShallowEquality)(e,r)?e:r}}const c=exports.profileGroup=((e=null,t)=>{if(r.actions.setProfileGroup.matches(t)){const{indexToView:e,profiles:i,name:r}=t.payload;return{indexToView:e,name:r,profiles:i.map((e,i)=>l(e,i)(void 0,t))}}if(null!=e){const{indexToView:n,profiles:c}=e,s=(0,a.clamp)((0,i.setter)(r.actions.setProfileIndexToView,0)(n,t),0,c.length-1),u=c.map((e,i)=>l(e.profile,i)(e,t));return n===s&&(0,o.objectsHaveShallowEquality)(c,u)?e:Object.assign({},e,{indexToView:s,profiles:u})}return e}); +},{"./flamechart-view-state":98,"./sandwich-view-state":100,"../lib/typed-redux":36,"./actions":40,"../lib/math":102,"../lib/utils":70}],40:[function(require,module,exports) { +"use strict";Object.defineProperty(exports,"__esModule",{value:!0}),exports.actions=void 0;var e=require("../lib/typed-redux"),t=require("./profiles-state"),a=exports.actions=void 0;!function(a){let o,r;a.setProfileGroup=(0,e.actionCreator)("setProfileGroup"),a.setProfileIndexToView=(0,e.actionCreator)("setProfileIndexToView"),a.setGLCanvas=(0,e.actionCreator)("setGLCanvas"),a.setViewMode=(0,e.actionCreator)("setViewMode"),a.setFlattenRecursion=(0,e.actionCreator)("setFlattenRecursion"),a.setDragActive=(0,e.actionCreator)("setDragActive"),a.setLoading=(0,e.actionCreator)("setLoading"),a.setError=(0,e.actionCreator)("setError"),a.setHashParams=(0,e.actionCreator)("setHashParams"),function(a){a.setTableSortMethod=(0,e.actionCreator)("sandwichView.setTableSortMethod"),a.setSelectedFrame=(0,t.actionCreatorWithIndex)("sandwichView.setSelectedFarmr")}(o=a.sandwichView||(a.sandwichView={})),function(e){e.setHoveredNode=(0,t.actionCreatorWithIndex)("flamechart.setHoveredNode"),e.setSelectedNode=(0,t.actionCreatorWithIndex)("flamechart.setSelectedNode"),e.setConfigSpaceViewportRect=(0,t.actionCreatorWithIndex)("flamechart.setConfigSpaceViewportRect"),e.setLogicalSpaceViewportSize=(0,t.actionCreatorWithIndex)("flamechart.setLogicalSpaceViewportSpace")}(r=a.flamechart||(a.flamechart={}))}(a||(exports.actions=a={})); +},{"../lib/typed-redux":36,"./profiles-state":52}],50:[function(require,module,exports) { +"use strict";function t(t=window.location.hash){try{if(!t.startsWith("#"))return{};const e=t.substr(1).split("&"),r={};for(const t of e){let[e,o]=t.split("=");o=decodeURIComponent(o),"profileURL"===e?r.profileURL=o:"title"===e?r.title=o:"localProfilePath"===e&&(r.localProfilePath=o)}return r}catch(t){return console.error("Error when loading hash fragment."),console.error(t),{}}}Object.defineProperty(exports,"__esModule",{value:!0}),exports.getHashParams=t; +},{}],158:[function(require,module,exports) { +"use strict";Object.defineProperty(exports,"__esModule",{value:!0}),exports.default=t;var e=/-webkit-|-moz-|-ms-/;function t(t){return"string"==typeof t&&e.test(t)}module.exports=exports.default; +},{}],123:[function(require,module,exports) { +"use strict";Object.defineProperty(exports,"__esModule",{value:!0}),exports.default=i;var e=require("css-in-js-utils/lib/isPrefixedValue"),t=r(e);function r(e){return e&&e.__esModule?e:{default:e}}var u=["-webkit-","-moz-",""];function i(e,r){if("string"==typeof r&&!(0,t.default)(r)&&r.indexOf("calc(")>-1)return u.map(function(e){return r.replace(/calc\(/g,e+"calc(")})}module.exports=exports.default; +},{"css-in-js-utils/lib/isPrefixedValue":158}],124:[function(require,module,exports) { +"use strict";Object.defineProperty(exports,"__esModule",{value:!0}),exports.default=u;var e=require("css-in-js-utils/lib/isPrefixedValue"),r=t(e);function t(e){return e&&e.__esModule?e:{default:e}}var s=["-webkit-",""];function u(e,t){if("string"==typeof t&&!(0,r.default)(t)&&t.indexOf("cross-fade(")>-1)return s.map(function(e){return t.replace(/cross-fade\(/g,e+"cross-fade(")})}module.exports=exports.default; +},{"css-in-js-utils/lib/isPrefixedValue":158}],125:[function(require,module,exports) { +"use strict";Object.defineProperty(exports,"__esModule",{value:!0}),exports.default=o;var e=["-webkit-","-moz-",""],r={"zoom-in":!0,"zoom-out":!0,grab:!0,grabbing:!0};function o(o,t){if("cursor"===o&&r.hasOwnProperty(t))return e.map(function(e){return e+t})}module.exports=exports.default; +},{}],126:[function(require,module,exports) { +"use strict";Object.defineProperty(exports,"__esModule",{value:!0}),exports.default=u;var e=require("css-in-js-utils/lib/isPrefixedValue"),t=r(e);function r(e){return e&&e.__esModule?e:{default:e}}var i=["-webkit-",""];function u(e,r){if("string"==typeof r&&!(0,t.default)(r)&&r.indexOf("filter(")>-1)return i.map(function(e){return r.replace(/filter\(/g,e+"filter(")})}module.exports=exports.default; +},{"css-in-js-utils/lib/isPrefixedValue":158}],127:[function(require,module,exports) { +"use strict";Object.defineProperty(exports,"__esModule",{value:!0}),exports.default=i;var e={flex:["-webkit-box","-moz-box","-ms-flexbox","-webkit-flex","flex"],"inline-flex":["-webkit-inline-box","-moz-inline-box","-ms-inline-flexbox","-webkit-inline-flex","inline-flex"]};function i(i,l){if("display"===i&&e.hasOwnProperty(l))return e[l]}module.exports=exports.default; +},{}],128:[function(require,module,exports) { +"use strict";Object.defineProperty(exports,"__esModule",{value:!0}),exports.default=s;var e={"space-around":"distribute","space-between":"justify","flex-start":"start","flex-end":"end"},t={alignContent:"msFlexLinePack",alignSelf:"msFlexItemAlign",alignItems:"msFlexAlign",justifyContent:"msFlexPack",order:"msFlexOrder",flexGrow:"msFlexPositive",flexShrink:"msFlexNegative",flexBasis:"msFlexPreferredSize"};function s(s,l,r){t.hasOwnProperty(s)&&(r[t[s]]=e[l]||l)}module.exports=exports.default; +},{}],129:[function(require,module,exports) { +"use strict";Object.defineProperty(exports,"__esModule",{value:!0}),exports.default=i;var e={"space-around":"justify","space-between":"justify","flex-start":"start","flex-end":"end","wrap-reverse":"multiple",wrap:"multiple"},t={alignItems:"WebkitBoxAlign",justifyContent:"WebkitBoxPack",flexWrap:"WebkitBoxLines"};function i(i,r,o){"flexDirection"===i&&"string"==typeof r&&(r.indexOf("column")>-1?o.WebkitBoxOrient="vertical":o.WebkitBoxOrient="horizontal",r.indexOf("reverse")>-1?o.WebkitBoxDirection="reverse":o.WebkitBoxDirection="normal"),t.hasOwnProperty(i)&&(o[t[i]]=e[r]||r)}module.exports=exports.default; +},{}],130:[function(require,module,exports) { +"use strict";Object.defineProperty(exports,"__esModule",{value:!0}),exports.default=n;var e=require("css-in-js-utils/lib/isPrefixedValue"),t=r(e);function r(e){return e&&e.__esModule?e:{default:e}}var i=["-webkit-","-moz-",""],a=/linear-gradient|radial-gradient|repeating-linear-gradient|repeating-radial-gradient/;function n(e,r){if("string"==typeof r&&!(0,t.default)(r)&&a.test(r))return i.map(function(e){return e+r})}module.exports=exports.default; +},{"css-in-js-utils/lib/isPrefixedValue":158}],131:[function(require,module,exports) { +"use strict";Object.defineProperty(exports,"__esModule",{value:!0}),exports.default=u;var e=require("css-in-js-utils/lib/isPrefixedValue"),t=r(e);function r(e){return e&&e.__esModule?e:{default:e}}var i=["-webkit-",""];function u(e,r){if("string"==typeof r&&!(0,t.default)(r)&&r.indexOf("image-set(")>-1)return i.map(function(e){return r.replace(/image-set\(/g,e+"image-set(")})}module.exports=exports.default; +},{"css-in-js-utils/lib/isPrefixedValue":158}],132:[function(require,module,exports) { +"use strict";function e(e,t){if("position"===e&&"sticky"===t)return["-webkit-sticky","sticky"]}Object.defineProperty(exports,"__esModule",{value:!0}),exports.default=e,module.exports=exports.default; +},{}],133:[function(require,module,exports) { +"use strict";Object.defineProperty(exports,"__esModule",{value:!0}),exports.default=i;var t=["-webkit-","-moz-",""],e={maxHeight:!0,maxWidth:!0,width:!0,height:!0,columnWidth:!0,minWidth:!0,minHeight:!0},n={"min-content":!0,"max-content":!0,"fill-available":!0,"fit-content":!0,"contain-floats":!0};function i(i,o){if(e.hasOwnProperty(i)&&n.hasOwnProperty(o))return t.map(function(t){return t+o})}module.exports=exports.default; +},{}],181:[function(require,module,exports) { +"use strict";var e=/[A-Z]/g,r=/^ms-/,s={};function t(t){return t in s?s[t]:s[t]=t.replace(e,"-$&").toLowerCase().replace(r,"-ms-")}module.exports=t; +},{}],165:[function(require,module,exports) { +"use strict";Object.defineProperty(exports,"__esModule",{value:!0}),exports.default=u;var e=require("hyphenate-style-name"),t=r(e);function r(e){return e&&e.__esModule?e:{default:e}}function u(e){return(0,t.default)(e)}module.exports=exports.default; +},{"hyphenate-style-name":181}],164:[function(require,module,exports) { +"use strict";function e(e){return e.charAt(0).toUpperCase()+e.slice(1)}Object.defineProperty(exports,"__esModule",{value:!0}),exports.default=e,module.exports=exports.default; +},{}],134:[function(require,module,exports) { +"use strict";Object.defineProperty(exports,"__esModule",{value:!0}),exports.default=l;var t=require("css-in-js-utils/lib/hyphenateProperty"),e=s(t),r=require("css-in-js-utils/lib/isPrefixedValue"),i=s(r),n=require("../../utils/capitalizeString"),o=s(n);function s(t){return t&&t.__esModule?t:{default:t}}var u={transition:!0,transitionProperty:!0,WebkitTransition:!0,WebkitTransitionProperty:!0,MozTransition:!0,MozTransitionProperty:!0},a={Webkit:"-webkit-",Moz:"-moz-",ms:"-ms-"};function f(t,r){if((0,i.default)(t))return t;for(var n=t.split(/,(?![^()]*(?:\([^()]*\))?\))/g),o=0,s=n.length;o-1&&"order"!==p)for(var d=r[l],c=0,b=d.length;c-1)return s;var a=n.split(/,(?![^()]*(?:\([^()]*\))?\))/g).filter(function(t){return!/-webkit-|-ms-/.test(t)}).join(",");return t.indexOf("Moz")>-1?a:(r["Webkit"+(0,o.default)(t)]=s,r["Moz"+(0,o.default)(t)]=a,n)}}module.exports=exports.default; +},{"css-in-js-utils/lib/hyphenateProperty":165,"css-in-js-utils/lib/isPrefixedValue":158,"../../utils/capitalizeString":164}],144:[function(require,module,exports) { +"use strict";function r(r){for(var t=5381,e=r.length;e;)t=33*t^r.charCodeAt(--e);return t>>>0}module.exports=r; +},{}],168:[function(require,module,exports) { +"use strict";Object.defineProperty(exports,"__esModule",{value:!0}),exports.default=u;var e=require("./capitalizeString"),r=t(e);function t(e){return e&&e.__esModule?e:{default:e}}function u(e,t,u){if(e.hasOwnProperty(t)){for(var o={},a=e[t],n=(0,r.default)(t),f=Object.keys(u),l=0;l0&&(i[s]=d)}else{var x=(0,u.default)(l,s,n,i,t);x&&(i[s]=x),i=(0,r.default)(t,s,i)}}return i}}module.exports=exports.default; +},{"../utils/prefixProperty":168,"../utils/prefixValue":169,"../utils/addNewValuesOnly":170,"../utils/isObject":171}],166:[function(require,module,exports) { +var global = arguments[3]; +var e=arguments[3];function t(e){r.length||(n(),a=!0),r[r.length]=e}module.exports=t;var n,r=[],a=!1,o=0,u=1024;function l(){for(;ou){for(var t=0,n=r.length-o;t4&&void 0!==arguments[4]?arguments[4]:[];if(!pe[e]){var o=ne(t,r,i,ye,n);he(e,o)}},Se=function(){de=[],pe={},me=!1,fe=null},ve=function(){return de},xe=function(){if(me)throw new Error("Cannot buffer while already buffering");me=!0},be=function(){me=!1;var e=de;return de=[],e},ke=function(){return be().join("")},Oe=function(){var e=be();e.length>0&&ce(e)},we=function(){return Object.keys(pe)},Ae=function(e){e.forEach(function(e){pe[e]=!0})},Ce=function e(t,r,n,i){for(var o=0;o0&&void 0!==arguments[0]?arguments[0]:[];Ae(e)}}()},Me="undefined"!=typeof window?null:{renderStatic:function(){return function(e){return Se(),xe(),{html:e(),css:{content:ke(),renderedClassNames:we()}}}}()},qe=null;function Fe(e,t){return{StyleSheet:Object.assign({},Re,{extend:function(){return function(r){var n=r.map(function(e){return e.selectorHandler}).filter(function(e){return e});return Fe(e,t.concat(n))}}()}),StyleSheetServer:Me,StyleSheetTestUtils:qe,minify:function(){return function(e){Ie=e?V:Te}}(),css:function(){return function(){for(var r=arguments.length,n=Array(r),i=0;i{this.viewport=e||null}),this.pendingScroll=0,this.onWindowResize=(()=>{this.recomputeVisibleIndices(this.props)}),this.onViewportScroll=(e=>{this.recomputeVisibleIndices(this.props)}),this.state={firstVisibleIndex:null,lastVisibleIndex:null,invisiblePrefixSize:null,viewportSize:null,cachedTotalSize:e.items.reduce((e,i)=>e+i.size,0)}}recomputeVisibleIndices(e){if(!this.viewport)return;const{items:i}=e,t=this.viewport.getBoundingClientRect().height,s=this.viewport.scrollTop-t/4,o=this.viewport.scrollTop+t+t/4;let l=0,r=0,n=0;for(;n=s)break}const p=n;for(;n=o)break}const c=Math.min(n,i.length-1);this.setState({invisiblePrefixSize:r,firstVisibleIndex:p,lastVisibleIndex:c})}scrollIndexIntoView(e){this.pendingScroll=this.props.items.reduce((i,t,s)=>s>=e?i:i+t.size,0)}applyPendingScroll(){if(!this.viewport)return;const e="y"===this.props.axis?"top":"left";this.viewport.scrollTo({[e]:this.pendingScroll})}componentWillReceiveProps(e){this.props.items!==e.items&&this.recomputeVisibleIndices(e)}componentDidMount(){this.applyPendingScroll(),this.recomputeVisibleIndices(this.props),window.addEventListener("resize",this.onWindowResize)}componentWillUnmount(){window.removeEventListener("resize",this.onWindowResize)}render(){const{cachedTotalSize:i,firstVisibleIndex:t,lastVisibleIndex:s,invisiblePrefixSize:o}=this.state;return(0,e.h)("div",{className:this.props.className,ref:this.viewportRef,onScroll:this.onViewportScroll},(0,e.h)("div",{style:{height:i}},(0,e.h)("div",{style:{transform:`translateY(${o}px)`}},null!=t&&null!=s&&this.props.renderItems(t,s))))}}exports.ScrollableListView=i; +},{"preact":24}],117:[function(require,module,exports) { +"use strict";Object.defineProperty(exports,"__esModule",{value:!0});class t{constructor(t){this.data=t,this.prev=null,this.next=null}}class e{constructor(){this.head=null,this.tail=null,this.size=0}getHead(){return this.head}getTail(){return this.tail}getSize(){return this.size}append(t){this.tail?(this.tail.next=t,t.prev=this.tail,this.tail=t):this.head=this.tail=t,this.size++}prepend(t){return this.head?(this.head.prev=t,t.next=this.head,this.head=t):this.head=this.tail=t,this.size++,t}pop(){if(this.tail){const t=this.tail;return t.prev?(this.tail=t.prev,this.tail.next=null):this.head=this.tail=null,this.size--,t.prev=null,t}return null}dequeue(){if(this.head){const t=this.head;return t.next?(this.head=t.next,this.head.prev=null):this.head=this.tail=null,this.size--,t.next=null,t}return null}remove(t){null==t.prev?this.dequeue():null==t.next?this.pop():(t.next.prev=t.prev,t.prev.next=t.next,t.next=null,t.prev=null,this.size--)}}exports.List=e;class i{constructor(t){this.capacity=t,this.list=new e,this.map=new Map}has(t){return this.map.has(t)}get(t){const e=this.map.get(t);return e?(this.list.remove(e.listNode),this.list.prepend(e.listNode),e?e.value:null):null}getSize(){return this.list.getSize()}getCapacity(){return this.capacity}insert(e,i){const s=this.map.get(e);for(s&&this.list.remove(s.listNode);this.list.getSize()>=this.capacity;)this.map.delete(this.list.pop().data);const h=this.list.prepend(new t(e));this.map.set(e,{value:i,listNode:h})}getOrInsert(t,e){let i=this.get(t);return null==i&&(i=e(t),this.insert(t,i)),i}removeLRU(){const t=this.list.pop();if(!t)return null;const e=t.data,i=this.map.get(e).value;return this.map.delete(e),[e,i]}clear(){this.list=new e,this.map=new Map}}exports.LRUCache=i; +},{}],96:[function(require,module,exports) { + +var t,e,n=module.exports={};function r(){throw new Error("setTimeout has not been defined")}function o(){throw new Error("clearTimeout has not been defined")}function i(e){if(t===setTimeout)return setTimeout(e,0);if((t===r||!t)&&setTimeout)return t=setTimeout,setTimeout(e,0);try{return t(e,0)}catch(n){try{return t.call(null,e,0)}catch(n){return t.call(this,e,0)}}}function u(t){if(e===clearTimeout)return clearTimeout(t);if((e===o||!e)&&clearTimeout)return e=clearTimeout,clearTimeout(t);try{return e(t)}catch(n){try{return e.call(null,t)}catch(n){return e.call(this,t)}}}!function(){try{t="function"==typeof setTimeout?setTimeout:r}catch(e){t=r}try{e="function"==typeof clearTimeout?clearTimeout:o}catch(t){e=o}}();var c,s=[],l=!1,a=-1;function f(){l&&c&&(l=!1,c.length?s=c.concat(s):a=-1,s.length&&h())}function h(){if(!l){var t=i(f);l=!0;for(var e=s.length;e;){for(c=s,s=[];++a1)for(var n=1;n=0&&e<=31),t.TEXTURE0+e}var h=exports.Graphics=void 0;!function(t){t.Rect=class{constructor(t=0,e=0,i=0,r=0){this.x=t,this.y=e,this.width=i,this.height=r}set(t,e,i,r){this.x=t,this.y=e,this.width=i,this.height=r}equals(t){return this.x===t.x&&this.y===t.y&&this.width===t.width&&this.height===t.height}};class e{constructor(t,e,i,r){this.redF=t,this.greenF=e,this.blueF=i,this.alphaF=r}}let i,r,s,n,h;e.TRANSPARENT=new e(0,0,0,0),t.Color=e,function(t){t[t.ZERO=0]="ZERO",t[t.ONE=1]="ONE",t[t.SOURCE_COLOR=2]="SOURCE_COLOR",t[t.TARGET_COLOR=3]="TARGET_COLOR",t[t.INVERSE_SOURCE_COLOR=4]="INVERSE_SOURCE_COLOR",t[t.INVERSE_TARGET_COLOR=5]="INVERSE_TARGET_COLOR",t[t.SOURCE_ALPHA=6]="SOURCE_ALPHA",t[t.TARGET_ALPHA=7]="TARGET_ALPHA",t[t.INVERSE_SOURCE_ALPHA=8]="INVERSE_SOURCE_ALPHA",t[t.INVERSE_TARGET_ALPHA=9]="INVERSE_TARGET_ALPHA",t[t.CONSTANT=10]="CONSTANT",t[t.INVERSE_CONSTANT=11]="INVERSE_CONSTANT"}(i=t.BlendOperation||(t.BlendOperation={})),function(t){t[t.TRIANGLES=0]="TRIANGLES",t[t.TRIANGLE_STRIP=1]="TRIANGLE_STRIP"}(r=t.Primitive||(t.Primitive={}));function a(t){return t==s.FLOAT?4:1}t.Context=class{setCopyBlendState(){this.setBlendState(i.ONE,i.ZERO)}setAddBlendState(){this.setBlendState(i.ONE,i.ONE)}setPremultipliedBlendState(){this.setBlendState(i.ONE,i.INVERSE_SOURCE_ALPHA)}setUnpremultipliedBlendState(){this.setBlendState(i.SOURCE_ALPHA,i.INVERSE_SOURCE_ALPHA)}},function(t){t[t.FLOAT=0]="FLOAT",t[t.BYTE=1]="BYTE"}(s=t.AttributeType||(t.AttributeType={})),t.attributeByteLength=a;class _{constructor(t,e,i,r){this.name=t,this.type=e,this.count=i,this.byteOffset=r}}t.Attribute=_;t.VertexFormat=class{constructor(){this._attributes=[],this._stride=0}get attributes(){return this._attributes}get stride(){return this._stride}add(t,e,i){return this.attributes.push(new _(t,e,i,this.stride)),this._stride+=i*a(e),this}};t.VertexBuffer=class{uploadFloat32Array(t){this.upload(new Uint8Array(t.buffer),0)}uploadFloats(t){this.uploadFloat32Array(new Float32Array(t))}},function(t){t[t.NEAREST=0]="NEAREST",t[t.LINEAR=1]="LINEAR"}(n=t.PixelFilter||(t.PixelFilter={})),function(t){t[t.REPEAT=0]="REPEAT",t[t.CLAMP=1]="CLAMP"}(h=t.PixelWrap||(t.PixelWrap={}));class o{constructor(t,e,i){this.minFilter=t,this.magFilter=e,this.wrap=i}}o.LINEAR_CLAMP=new o(n.LINEAR,n.LINEAR,h.CLAMP),o.LINEAR_MIN_NEAREST_MAG_CLAMP=new o(n.LINEAR,n.NEAREST,h.CLAMP),o.NEAREST_CLAMP=new o(n.NEAREST,n.NEAREST,h.CLAMP),t.TextureFormat=o}(h||(exports.Graphics=h={}));var a=exports.WebGL=void 0;!function(t){class a extends h.Context{constructor(t=document.createElement("canvas")){super(),this._attributeCount=0,this._blendOperations=0,this._contextResetHandlers=[],this._currentClearColor=h.Color.TRANSPARENT,this._currentRenderTarget=null,this._defaultViewport=new h.Rect,this._forceStateUpdate=!0,this._generation=1,this._height=0,this._oldBlendOperations=0,this._oldRenderTarget=null,this._oldViewport=new h.Rect,this._width=0,this.handleWebglContextRestored=(()=>{this._attributeCount=0,this._currentClearColor=h.Color.TRANSPARENT,this._forceStateUpdate=!0,this._generation++;for(let t of this._contextResetHandlers)t()}),this.ANGLE_instanced_arrays=null,this.ANGLE_instanced_arrays_generation=-1;let e=t.getContext("webgl",{alpha:!1,antialias:!1,depth:!1,preserveDrawingBuffer:!1,stencil:!1});if(null==e)throw new Error("Setup failure");this._gl=e;let i=t.style;t.width=0,t.height=0,i.width=i.height="0",t.addEventListener("webglcontextlost",t=>{t.preventDefault()}),t.addEventListener("webglcontextrestored",this.handleWebglContextRestored),this._blendOperationMap={[h.BlendOperation.ZERO]:this._gl.ZERO,[h.BlendOperation.ONE]:this._gl.ONE,[h.BlendOperation.SOURCE_COLOR]:this._gl.SRC_COLOR,[h.BlendOperation.TARGET_COLOR]:this._gl.DST_COLOR,[h.BlendOperation.INVERSE_SOURCE_COLOR]:this._gl.ONE_MINUS_SRC_COLOR,[h.BlendOperation.INVERSE_TARGET_COLOR]:this._gl.ONE_MINUS_DST_COLOR,[h.BlendOperation.SOURCE_ALPHA]:this._gl.SRC_ALPHA,[h.BlendOperation.TARGET_ALPHA]:this._gl.DST_ALPHA,[h.BlendOperation.INVERSE_SOURCE_ALPHA]:this._gl.ONE_MINUS_SRC_ALPHA,[h.BlendOperation.INVERSE_TARGET_ALPHA]:this._gl.ONE_MINUS_DST_ALPHA,[h.BlendOperation.CONSTANT]:this._gl.CONSTANT_COLOR,[h.BlendOperation.INVERSE_CONSTANT]:this._gl.ONE_MINUS_CONSTANT_COLOR}}get widthInPixels(){return this._width}get heightInPixels(){return this._height}testContextLoss(){this.handleWebglContextRestored()}get gl(){return this._gl}get generation(){return this._generation}addContextResetHandler(t){r(this._contextResetHandlers,t)}removeContextResetHandler(t){s(this._contextResetHandlers,t)}get currentRenderTarget(){return this._currentRenderTarget}beginFrame(){this.setRenderTarget(null)}endFrame(){}setBlendState(t,e){this._blendOperations=a._packBlendModes(t,e)}setViewport(t,e,i,r){(null!=this._currentRenderTarget?this._currentRenderTarget.viewport:this._defaultViewport).set(t,e,i,r)}get viewport(){return null!=this._currentRenderTarget?this._currentRenderTarget.viewport:this._defaultViewport}get renderTargetWidthInPixels(){return null!=this._currentRenderTarget?this._currentRenderTarget.viewport.width:this._width}get renderTargetHeightInPixels(){return null!=this._currentRenderTarget?this._currentRenderTarget.viewport.height:this._height}draw(t,e,i){this._updateRenderTargetAndViewport(),f.from(e).prepare(),R.from(i).prepare(),this._updateFormat(e.format),this._updateBlendState(),this._gl.drawArrays(t==h.Primitive.TRIANGLES?this._gl.TRIANGLES:this._gl.TRIANGLE_STRIP,0,Math.floor(i.byteCount/e.format.stride)),this._forceStateUpdate=!1}resize(t,e,i,r){let s=this._gl.canvas;const n=s.getBoundingClientRect();if(this._width===i&&this._height===e&&n.width===i&&n.height===r)return;let h=s.style;s.width=t,s.height=e,h.width=`${i}px`,h.height=`${r}px`,this.setViewport(0,0,t,e),this._width=t,this._height=e}clear(t){this._updateRenderTargetAndViewport(),this._updateBlendState(),t!=this._currentClearColor&&(this._gl.clearColor(t.redF,t.greenF,t.blueF,t.alphaF),this._currentClearColor=t),this._gl.clear(this._gl.COLOR_BUFFER_BIT)}setRenderTarget(t){this._currentRenderTarget=A.from(t)}createMaterial(t,e,i){let r=new f(this,t,e,i);return r.program,r}createVertexBuffer(t){return i(t>0&&t%4==0),new R(this,t)}createTexture(t,e,i,r){return new p(this,t,e,i,r)}createRenderTarget(t){return new A(this,p.from(t))}getANGLE_instanced_arrays(){if(this.ANGLE_instanced_arrays_generation!==this._generation&&(this.ANGLE_instanced_arrays=null),!this.ANGLE_instanced_arrays&&(this.ANGLE_instanced_arrays=this.gl.getExtension("ANGLE_instanced_arrays"),!this.ANGLE_instanced_arrays))throw new Error("Failed to get extension ANGLE_instanced_arrays");return this.ANGLE_instanced_arrays}_updateRenderTargetAndViewport(){let t=this._currentRenderTarget,e=null!=t?t.viewport:this._defaultViewport,i=this._gl;(this._forceStateUpdate||this._oldRenderTarget!=t)&&(i.bindFramebuffer(i.FRAMEBUFFER,t?t.framebuffer:null),this._oldRenderTarget=t),!this._forceStateUpdate&&this._oldViewport.equals(e)||(i.viewport(e.x,this.renderTargetHeightInPixels-e.y-e.height,e.width,e.height),this._oldViewport.set(e.x,e.y,e.width,e.height))}_updateBlendState(){if(this._forceStateUpdate||this._oldBlendOperations!=this._blendOperations){let t=this._gl,e=this._blendOperations,r=this._oldBlendOperations,s=15&e,n=e>>4;i(s in this._blendOperationMap),i(n in this._blendOperationMap),e==a.COPY_BLEND_OPERATIONS?t.disable(t.BLEND):((this._forceStateUpdate||r==a.COPY_BLEND_OPERATIONS)&&t.enable(t.BLEND),t.blendFunc(this._blendOperationMap[s],this._blendOperationMap[n])),this._oldBlendOperations=e}}_updateFormat(t){let e=this._gl,i=t.attributes,r=i.length;for(let s=0;sr;)this._attributeCount--,e.disableVertexAttribArray(this._attributeCount);this._attributeCount=r}getWebGLInfo(){const t=this.gl.getExtension("WEBGL_debug_renderer_info");return{renderer:t?this.gl.getParameter(t.UNMASKED_RENDERER_WEBGL):null,vendor:t?this.gl.getParameter(t.UNMASKED_VENDOR_WEBGL):null,version:this.gl.getParameter(this.gl.VERSION)}}static from(t){return i(null==t||t instanceof a),t}static _packBlendModes(t,e){return t|e<<4}}a.COPY_BLEND_OPERATIONS=a._packBlendModes(h.BlendOperation.ONE,h.BlendOperation.ZERO),t.Context=a;class _{constructor(t,e,i=0,r=null,s=!0){this._material=t,this._name=e,this._generation=i,this._location=r,this._isDirty=s}get location(){let t=a.from(this._material.context);if(this._generation!=t.generation&&(this._location=t.gl.getUniformLocation(this._material.program,this._name),this._generation=t.generation,!e)){let e=this._material.program,r=t.gl;for(let t=0,s=r.getProgramParameter(e,r.ACTIVE_UNIFORMS);t0&&this._texture.height>0?this._texture.texture:null)}}class f{constructor(t,e,i,r,s={},n=[],h=0,a=null){this._context=t,this._format=e,this._vertexSource=i,this._fragmentSource=r,this._uniformsMap=s,this._uniformsList=n,this._generation=h,this._program=a}get context(){return this._context}get format(){return this._format}get vertexSource(){return this._vertexSource}get fragmentSource(){return this._fragmentSource}setUniformFloat(t,e){let r=this._uniformsMap[t]||null;null==r&&(r=new o(this,t),this._uniformsMap[t]=r,this._uniformsList.push(r)),i(r instanceof o),r.set(e)}setUniformInt(t,e){let r=this._uniformsMap[t]||null;null==r&&(r=new l(this,t),this._uniformsMap[t]=r,this._uniformsList.push(r)),i(r instanceof l),r.set(e)}setUniformVec2(t,e,r){let s=this._uniformsMap[t]||null;null==s&&(s=new u(this,t),this._uniformsMap[t]=s,this._uniformsList.push(s)),i(s instanceof u),s.set(e,r)}setUniformVec3(t,e,r,s){let n=this._uniformsMap[t]||null;null==n&&(n=new c(this,t),this._uniformsMap[t]=n,this._uniformsList.push(n)),i(n instanceof c),n.set(e,r,s)}setUniformVec4(t,e,r,s,n){let h=this._uniformsMap[t]||null;null==h&&(h=new d(this,t),this._uniformsMap[t]=h,this._uniformsList.push(h)),i(h instanceof d),h.set(e,r,s,n)}setUniformMat3(t,e,r,s,n,h,a,_,o,l){let u=this._uniformsMap[t]||null;null==u&&(u=new g(this,t),this._uniformsMap[t]=u,this._uniformsList.push(u)),i(u instanceof g),u.set(e,r,s,n,h,a,_,o,l)}setUniformSampler(t,e,r){let s=this._uniformsMap[t]||null;null==s&&(s=new E(this,t),this._uniformsMap[t]=s,this._uniformsList.push(s)),i(s instanceof E),s.set(e,r)}get program(){let t=this._context.gl;if(this._generation!=this._context.generation){this._program=t.createProgram(),this._compileShader(t,t.VERTEX_SHADER,this.vertexSource),this._compileShader(t,t.FRAGMENT_SHADER,this.fragmentSource);let r=this.format.attributes;for(let e=0;e=0),i(0<=t&&t+r<=this._byteCount),i(0<=e&&e+r<=this._byteCount),this._bytes&&t!=e&&0!=r&&(this._bytes.set(this._bytes.subarray(t,this._byteCount),e),this._growDirtyRegion(Math.min(t,e),Math.max(t,e)+r))}upload(t,e=0){i(0<=e&&e+t.length<=this._byteCount),i(null!=this._bytes),this._bytes.set(t,e),this._growDirtyRegion(e,e+t.length)}free(){this._buffer&&this._context.gl.deleteBuffer(this._buffer),this._generation=0}prepare(){let t=this._context.gl;this._generation!==this._context.generation&&(this._buffer=t.createBuffer(),this._generation=this._context.generation,this._isDirty=!0),t.bindBuffer(t.ARRAY_BUFFER,this._buffer),this._isDirty&&(t.bufferData(t.ARRAY_BUFFER,this._byteCount,t.DYNAMIC_DRAW),this._dirtyMin=this._totalMin,this._dirtyMax=this._totalMax,this._isDirty=!1),this._dirtyMin{const t=e.AffineTransform.betweenRects(i.configSpaceSrcRect,i.physicalSpaceDstRect),r=new e.Vec2(this.gl.viewport.width,this.gl.viewport.height);return e.AffineTransform.withTranslation(new e.Vec2(-1,1)).times(e.AffineTransform.withScale(new e.Vec2(2,-2).dividedByPointwise(r))).times(t)})()),this.gl.setUnpremultipliedBlendState(),this.gl.draw(t.Graphics.Primitive.TRIANGLES,this.material,i.batch.getBuffer())}}exports.RectangleBatchRenderer=c; +},{"../lib/math":102,"./graphics":42,"./utils":119}],76:[function(require,module,exports) { +"use strict";Object.defineProperty(exports,"__esModule",{value:!0}),exports.Color=void 0;var t=require("./math");class r{constructor(t=0,r=0,e=0,o=1){this.r=t,this.g=r,this.b=e,this.a=o}static fromLumaChromaHue(e,o,s){const i=s/60,a=o*(1-Math.abs(i%2-1)),[h,c,u]=i<1?[o,a,0]:i<2?[a,o,0]:i<3?[0,o,a]:i<4?[0,a,o]:i<5?[a,0,o]:[o,0,a],l=e-(.3*h+.59*c+.11*u);return new r((0,t.clamp)(h+l,0,1),(0,t.clamp)(c+l,0,1),(0,t.clamp)(u+l,0,1),1)}toCSS(){return`rgba(${(255*this.r).toFixed()}, ${(255*this.g).toFixed()}, ${(255*this.b).toFixed()}, ${this.a.toFixed(2)})`}}exports.Color=r; +},{"./math":102}],72:[function(require,module,exports) { +"use strict";Object.defineProperty(exports,"__esModule",{value:!0}),exports.RowAtlas=void 0;var e=require("../lib/lru-cache"),t=require("./rectangle-batch-renderer"),r=require("../lib/math"),i=require("../lib/color"),c=require("./graphics"),h=require("./utils");class a{constructor(h,a,s){this.gl=h,this.rectangleBatchRenderer=a,this.textureRenderer=s,this.texture=h.createTexture(c.Graphics.TextureFormat.NEAREST_CLAMP,4096,4096),this.renderTarget=h.createRenderTarget(this.texture),this.rowCache=new e.LRUCache(this.texture.height),this.clearLineBatch=new t.RectangleBatch(h),this.clearLineBatch.addRect(r.Rect.unit,new i.Color(0,0,0,0)),h.addContextResetHandler(()=>{this.rowCache.clear()})}has(e){return this.rowCache.has(e)}getResolution(){return this.texture.width}getCapacity(){return this.texture.height}allocateLine(e){if(this.rowCache.getSize(){for(let i of e){let e=this.rowCache.get(i);if(null!=e)continue;e=this.allocateLine(i);const c=new r.Rect(new r.Vec2(0,e),new r.Vec2(this.texture.width,1));this.rectangleBatchRenderer.render({batch:this.clearLineBatch,configSpaceSrcRect:r.Rect.unit,physicalSpaceDstRect:c}),t(c,i)}})}renderViaAtlas(e,t){let i=this.rowCache.get(e);if(null==i)return!1;const c=new r.Rect(new r.Vec2(0,i),new r.Vec2(this.texture.width,1));return this.textureRenderer.render({texture:this.texture,srcRect:c,dstRect:t}),!0}}exports.RowAtlas=a; +},{"../lib/lru-cache":117,"./rectangle-batch-renderer":118,"../lib/math":102,"../lib/color":76,"./graphics":42,"./utils":119}],120:[function(require,module,exports) { +"use strict";Object.defineProperty(exports,"__esModule",{value:!0}),exports.TextureRenderer=void 0;var e=require("../lib/math"),t=require("./graphics"),r=require("./utils");const n="\n uniform mat3 uvTransform;\n uniform mat3 positionTransform;\n\n attribute vec2 position;\n attribute vec2 uv;\n varying vec2 vUv;\n\n void main() {\n vUv = (uvTransform * vec3(uv, 1)).xy;\n gl_Position = vec4((positionTransform * vec3(position, 1)).xy, 0, 1);\n }\n",i="\n precision mediump float;\n\n varying vec2 vUv;\n uniform sampler2D texture;\n\n void main() {\n gl_FragColor = texture2D(texture, vUv);\n }\n";class s{constructor(e){this.gl=e;const r=new t.Graphics.VertexFormat;r.add("position",t.Graphics.AttributeType.FLOAT,2),r.add("uv",t.Graphics.AttributeType.FLOAT,2);const s=[{pos:[-1,1],uv:[0,1]},{pos:[1,1],uv:[1,1]},{pos:[-1,-1],uv:[0,0]},{pos:[1,-1],uv:[1,0]}],o=[];for(let e of s)o.push(e.pos[0]),o.push(e.pos[1]),o.push(e.uv[0]),o.push(e.uv[1]);this.buffer=e.createVertexBuffer(r.stride*s.length),this.buffer.upload(new Uint8Array(new Float32Array(o).buffer)),this.material=e.createMaterial(r,n,i)}render(n){this.material.setUniformSampler("texture",n.texture,0),(0,r.setUniformAffineTransform)(this.material,"uvTransform",(()=>{const{srcRect:t,texture:r}=n,i=e.AffineTransform.withTranslation(new e.Vec2(0,1)).times(e.AffineTransform.withScale(new e.Vec2(1,-1))).times(e.AffineTransform.betweenRects(new e.Rect(e.Vec2.zero,new e.Vec2(r.width,r.height)),e.Rect.unit)).transformRect(t);return e.AffineTransform.betweenRects(e.Rect.unit,i)})()),(0,r.setUniformAffineTransform)(this.material,"positionTransform",(()=>{const{dstRect:t}=n,{viewport:r}=this.gl,i=new e.Vec2(r.width,r.height),s=e.AffineTransform.withScale(new e.Vec2(1,-1)).times(e.AffineTransform.betweenRects(new e.Rect(e.Vec2.zero,i),e.Rect.NDC)).transformRect(t);return e.AffineTransform.betweenRects(e.Rect.NDC,s)})()),this.gl.setUnpremultipliedBlendState(),this.gl.draw(t.Graphics.Primitive.TRIANGLE_STRIP,this.material,this.buffer)}}exports.TextureRenderer=s; +},{"../lib/math":102,"./graphics":42,"./utils":119}],121:[function(require,module,exports) { +"use strict";Object.defineProperty(exports,"__esModule",{value:!0}),exports.ViewportRectangleRenderer=void 0;var e=require("./graphics"),i=require("./utils");const r=new e.Graphics.VertexFormat;r.add("position",e.Graphics.AttributeType.FLOAT,2);const o="\n attribute vec2 position;\n\n void main() {\n gl_Position = vec4(position, 0, 1);\n }\n",n="\n precision mediump float;\n\n uniform mat3 configSpaceToPhysicalViewSpace;\n uniform vec2 physicalSize;\n uniform vec2 physicalOrigin;\n uniform vec2 configSpaceViewportOrigin;\n uniform vec2 configSpaceViewportSize;\n uniform float framebufferHeight;\n\n void main() {\n vec2 origin = (configSpaceToPhysicalViewSpace * vec3(configSpaceViewportOrigin, 1.0)).xy;\n vec2 size = (configSpaceToPhysicalViewSpace * vec3(configSpaceViewportSize, 0.0)).xy;\n\n vec2 halfSize = physicalSize / 2.0;\n\n float borderWidth = 2.0;\n\n origin = floor(origin * halfSize) / halfSize + borderWidth * vec2(1.0, 1.0);\n size = floor(size * halfSize) / halfSize - 2.0 * borderWidth * vec2(1.0, 1.0);\n\n vec2 coord = gl_FragCoord.xy;\n coord.x = coord.x - physicalOrigin.x;\n coord.y = framebufferHeight - coord.y - physicalOrigin.y;\n vec2 clamped = clamp(coord, origin, origin + size);\n vec2 gap = clamped - coord;\n float maxdist = max(abs(gap.x), abs(gap.y));\n\n // TOOD(jlfwong): Could probably optimize this to use mix somehow.\n if (maxdist == 0.0) {\n // Inside viewport rectangle\n gl_FragColor = vec4(0, 0, 0, 0);\n } else if (maxdist < borderWidth) {\n // Inside viewport rectangle at border\n gl_FragColor = vec4(0.7, 0.7, 0.7, 0.8);\n } else {\n // Outside viewport rectangle\n gl_FragColor = vec4(0.7, 0.7, 0.7, 0.5);\n }\n }\n";class t{constructor(e){this.gl=e;const i=[[-1,1],[1,1],[-1,-1],[1,-1]],t=[];for(let e of i)t.push(e[0]),t.push(e[1]);this.buffer=e.createVertexBuffer(r.stride*i.length),this.buffer.upload(new Uint8Array(new Float32Array(t).buffer)),this.material=e.createMaterial(r,o,n)}render(r){(0,i.setUniformAffineTransform)(this.material,"configSpaceToPhysicalViewSpace",r.configSpaceToPhysicalViewSpace),(0,i.setUniformVec2)(this.material,"configSpaceViewportOrigin",r.configSpaceViewportRect.origin),(0,i.setUniformVec2)(this.material,"configSpaceViewportSize",r.configSpaceViewportRect.size);const o=this.gl.viewport;this.material.setUniformVec2("physicalOrigin",o.x,o.y),this.material.setUniformVec2("physicalSize",o.width,o.height),this.material.setUniformFloat("framebufferHeight",this.gl.renderTargetHeightInPixels),this.gl.setBlendState(e.Graphics.BlendOperation.SOURCE_ALPHA,e.Graphics.BlendOperation.INVERSE_SOURCE_ALPHA),this.gl.draw(e.Graphics.Primitive.TRIANGLE_STRIP,this.material,this.buffer)}}exports.ViewportRectangleRenderer=t; +},{"./graphics":42,"./utils":119}],122:[function(require,module,exports) { +"use strict";Object.defineProperty(exports,"__esModule",{value:!0}),exports.FlamechartColorPassRenderer=void 0;var e=require("../lib/math"),t=require("./graphics"),n=require("./utils");const r=new t.Graphics.VertexFormat;r.add("position",t.Graphics.AttributeType.FLOAT,2),r.add("uv",t.Graphics.AttributeType.FLOAT,2);const i="\n uniform mat3 uvTransform;\n uniform mat3 positionTransform;\n\n attribute vec2 position;\n attribute vec2 uv;\n varying vec2 vUv;\n\n void main() {\n vUv = (uvTransform * vec3(uv, 1)).xy;\n gl_Position = vec4((positionTransform * vec3(position, 1)).xy, 0, 1);\n }\n",o="\n precision mediump float;\n\n uniform vec2 uvSpacePixelSize;\n uniform float renderOutlines;\n\n varying vec2 vUv;\n uniform sampler2D colorTexture;\n\n // https://en.wikipedia.org/wiki/HSL_and_HSV#From_luma/chroma/hue\n vec3 hcl2rgb(float H, float C, float L) {\n float hPrime = H / 60.0;\n float X = C * (1.0 - abs(mod(hPrime, 2.0) - 1.0));\n vec3 RGB =\n hPrime < 1.0 ? vec3(C, X, 0) :\n hPrime < 2.0 ? vec3(X, C, 0) :\n hPrime < 3.0 ? vec3(0, C, X) :\n hPrime < 4.0 ? vec3(0, X, C) :\n hPrime < 5.0 ? vec3(X, 0, C) :\n vec3(C, 0, X);\n\n float m = L - dot(RGB, vec3(0.30, 0.59, 0.11));\n return RGB + vec3(m, m, m);\n }\n\n float triangle(float x) {\n return 2.0 * abs(fract(x) - 0.5) - 1.0;\n }\n\n vec3 colorForBucket(float t) {\n float x = triangle(30.0 * t);\n float H = 360.0 * (0.9 * t);\n float C = 0.25 + 0.2 * x;\n float L = 0.80 - 0.15 * x;\n return hcl2rgb(H, C, L);\n }\n\n void main() {\n vec4 here = texture2D(colorTexture, vUv);\n\n if (here.z == 0.0) {\n // Background color\n gl_FragColor = vec4(0, 0, 0, 0);\n return;\n }\n\n // Sample the 4 surrounding pixels in the depth texture to determine\n // if we should draw a boundary here or not.\n vec4 N = texture2D(colorTexture, vUv + vec2(0, uvSpacePixelSize.y));\n vec4 E = texture2D(colorTexture, vUv + vec2(uvSpacePixelSize.x, 0));\n vec4 S = texture2D(colorTexture, vUv + vec2(0, -uvSpacePixelSize.y));\n vec4 W = texture2D(colorTexture, vUv + vec2(-uvSpacePixelSize.x, 0));\n\n // NOTE: For outline checks, we intentionally check both the right\n // and the left to determine if we're an edge. If a rectangle is a single\n // pixel wide, we don't want to render it as an outline, so this method\n // of checking ensures that we don't outline single physical-space\n // pixel width rectangles.\n if (\n renderOutlines > 0.0 &&\n (\n here.y == N.y && here.y != S.y || // Top edge\n here.y == S.y && here.y != N.y || // Bottom edge\n here.x == E.x && here.x != W.x || // Left edge\n here.x == W.x && here.x != E.x\n )\n ) {\n // We're on an edge! Draw transparent.\n gl_FragColor = vec4(0, 0, 0, 0);\n } else {\n // Not on an edge. Draw the appropriate color.\n gl_FragColor = vec4(colorForBucket(here.z), here.a);\n }\n }\n";class a{constructor(e){this.gl=e;const t=[{pos:[-1,1],uv:[0,1]},{pos:[1,1],uv:[1,1]},{pos:[-1,-1],uv:[0,0]},{pos:[1,-1],uv:[1,0]}],n=[];for(let e of t)n.push(e.pos[0]),n.push(e.pos[1]),n.push(e.uv[0]),n.push(e.uv[1]);this.buffer=e.createVertexBuffer(r.stride*t.length),this.buffer.uploadFloats(n),this.material=e.createMaterial(r,i,o)}render(r){const{srcRect:i,rectInfoTexture:o}=r,a=e.AffineTransform.withTranslation(new e.Vec2(0,1)).times(e.AffineTransform.withScale(new e.Vec2(1,-1))).times(e.AffineTransform.betweenRects(new e.Rect(e.Vec2.zero,new e.Vec2(o.width,o.height)),e.Rect.unit)).transformRect(i),s=e.AffineTransform.betweenRects(e.Rect.unit,a),{dstRect:c}=r,l=new e.Vec2(this.gl.viewport.width,this.gl.viewport.height),u=e.AffineTransform.withScale(new e.Vec2(1,-1)).times(e.AffineTransform.betweenRects(new e.Rect(e.Vec2.zero,l),e.Rect.NDC)).transformRect(c),f=e.AffineTransform.betweenRects(e.Rect.NDC,u),h=e.Vec2.unit.dividedByPointwise(new e.Vec2(r.rectInfoTexture.width,r.rectInfoTexture.height));this.material.setUniformSampler("colorTexture",r.rectInfoTexture,0),(0,n.setUniformAffineTransform)(this.material,"uvTransform",s),this.material.setUniformFloat("renderOutlines",r.renderOutlines?1:0),this.material.setUniformVec2("uvSpacePixelSize",h.x,h.y),(0,n.setUniformAffineTransform)(this.material,"positionTransform",f),this.gl.setUnpremultipliedBlendState(),this.gl.draw(t.Graphics.Primitive.TRIANGLE_STRIP,this.material,this.buffer)}}exports.FlamechartColorPassRenderer=a; +},{"../lib/math":102,"./graphics":42,"./utils":119}],74:[function(require,module,exports) { +"use strict";Object.defineProperty(exports,"__esModule",{value:!0}),exports.CanvasContext=void 0;var e=require("./graphics"),r=require("./rectangle-batch-renderer"),t=require("./texture-renderer"),i=require("../lib/math"),n=require("./overlay-rectangle-renderer"),s=require("./flamechart-color-pass-renderer");class o{constructor(i){this.animationFrameRequest=null,this.beforeFrameHandlers=new Set,this.onBeforeFrame=(()=>{this.animationFrameRequest=null,this.gl.setViewport(0,0,this.gl.renderTargetWidthInPixels,this.gl.renderTargetHeightInPixels),this.gl.clear(new e.Graphics.Color(1,1,1,1));for(const e of this.beforeFrameHandlers)e()}),this.gl=new e.WebGL.Context(i),this.rectangleBatchRenderer=new r.RectangleBatchRenderer(this.gl),this.textureRenderer=new t.TextureRenderer(this.gl),this.viewportRectangleRenderer=new n.ViewportRectangleRenderer(this.gl),this.flamechartColorPassRenderer=new s.FlamechartColorPassRenderer(this.gl);const o=this.gl.getWebGLInfo();o&&console.log(`WebGL initialized. renderer: ${o.renderer}, vendor: ${o.vendor}, version: ${o.version}`),window.testContextLoss=(()=>{this.gl.testContextLoss()})}addBeforeFrameHandler(e){this.beforeFrameHandlers.add(e)}removeBeforeFrameHandler(e){this.beforeFrameHandlers.delete(e)}requestFrame(){this.animationFrameRequest||(this.animationFrameRequest=requestAnimationFrame(this.onBeforeFrame))}setViewport(e,r){const{origin:t,size:i}=e;let n=this.gl.viewport;this.gl.setViewport(t.x,t.y,i.x,i.y),r();let{x:s,y:o,width:a,height:l}=n;this.gl.setViewport(s,o,a,l)}renderBehind(e,r){const t=e.getBoundingClientRect(),n=new i.Rect(new i.Vec2(t.left*window.devicePixelRatio,t.top*window.devicePixelRatio),new i.Vec2(t.width*window.devicePixelRatio,t.height*window.devicePixelRatio));this.setViewport(n,r)}}exports.CanvasContext=o; +},{"./graphics":42,"./rectangle-batch-renderer":118,"./texture-renderer":120,"../lib/math":102,"./overlay-rectangle-renderer":121,"./flamechart-color-pass-renderer":122}],38:[function(require,module,exports) { +"use strict";Object.defineProperty(exports,"__esModule",{value:!0}),exports.getFrameToColorBucket=exports.getProfileToView=exports.getProfileWithRecursionFlattened=exports.getRowAtlas=exports.getCanvasContext=exports.createGetCSSColorForFrame=exports.createGetColorBucketForFrame=void 0;var e=require("../lib/utils"),t=require("../gl/row-atlas"),r=require("../gl/canvas-context"),o=require("../lib/color");const n=exports.createGetColorBucketForFrame=(0,e.memoizeByReference)(e=>t=>e.get(t.key)||0),a=exports.createGetCSSColorForFrame=(0,e.memoizeByReference)(t=>{const r=n(t);return t=>{const n=r(t)/255,a=(0,e.triangle)(30*n),l=.9*n*360,i=.25+.2*a,s=.8-.15*a;return o.Color.fromLumaChromaHue(s,i,l).toCSS()}}),l=exports.getCanvasContext=(0,e.memoizeByReference)(e=>new r.CanvasContext(e)),i=exports.getRowAtlas=(0,e.memoizeByReference)(e=>new t.RowAtlas(e.gl,e.rectangleBatchRenderer,e.textureRenderer)),s=exports.getProfileWithRecursionFlattened=(0,e.memoizeByReference)(e=>e.getProfileWithRecursionFlattened()),c=exports.getProfileToView=(0,e.memoizeByShallowEquality)(({profile:e,flattenRecursion:t})=>t?e.getProfileWithRecursionFlattened():e),u=exports.getFrameToColorBucket=(0,e.memoizeByReference)(e=>{const t=[];function r(e){return(e.file||"")+e.name}e.forEachFrame(e=>t.push(e)),t.sort(function(e,t){return r(e)>r(t)?1:-1});const o=new Map;for(let e=0;e{t.preventDefault();const{sortMethod:o}=this.props;if(o.field==e)this.props.setSortMethod({field:e,direction:o.direction===h.ASCENDING?h.DESCENDING:h.ASCENDING});else switch(e){case n.SYMBOL_NAME:this.props.setSortMethod({field:e,direction:h.ASCENDING});break;case n.SELF:case n.TOTAL:this.props.setSortMethod({field:e,direction:h.DESCENDING})}}),this.getFrameList=(()=>{const{profile:e,sortMethod:t}=this.props,r=[];switch(e.forEachFrame(e=>r.push(e)),t.field){case n.SYMBOL_NAME:(0,o.sortBy)(r,e=>e.name.toLowerCase());break;case n.SELF:(0,o.sortBy)(r,e=>e.getSelfWeight());break;case n.TOTAL:(0,o.sortBy)(r,e=>e.getTotalWeight())}return t.direction===h.DESCENDING&&r.reverse(),r}),this.listView=null,this.listViewRef=(e=>{if(e===this.listView)return;this.listView=e;const{selectedFrame:t}=this.props;if(!t||!e)return;const o=this.getFrameList().indexOf(t);-1!==o&&e.scrollIndexIntoView(o)})}renderRow(r,s){const{profile:l,selectedFrame:a}=this.props,c=r.getTotalWeight(),n=r.getSelfWeight(),h=100*c/l.getTotalNonIdleWeight(),p=100*n/l.getTotalNonIdleWeight(),S=r===a;return(0,e.h)("tr",{key:`${s}`,onClick:this.props.setSelectedFrame.bind(null,r),className:(0,t.css)(E.tableRow,s%2==0&&E.tableRowEven,S&&E.tableRowSelected)},(0,e.h)("td",{className:(0,t.css)(E.numericCell)},l.formatValue(c)," (",(0,o.formatPercent)(h),")",(0,e.h)(d,{perc:h})),(0,e.h)("td",{className:(0,t.css)(E.numericCell)},l.formatValue(n)," (",(0,o.formatPercent)(p),")",(0,e.h)(d,{perc:p})),(0,e.h)("td",{title:r.file,className:(0,t.css)(E.textCell)},(0,e.h)(i.ColorChit,{color:this.props.getCSSColorForFrame(r)}),r.name))}render(){const{sortMethod:o}=this.props,i=this.getFrameList(),l=i.map(e=>({size:r.Sizes.FRAME_HEIGHT}));return(0,e.h)("div",{className:(0,t.css)(r.commonStyle.vbox,E.profileTableView)},(0,e.h)("table",{className:(0,t.css)(E.tableView)},(0,e.h)("thead",{className:(0,t.css)(E.tableHeader)},(0,e.h)("tr",null,(0,e.h)("th",{className:(0,t.css)(E.numericCell),onClick:e=>this.onSortClick(n.TOTAL,e)},(0,e.h)(p,{activeDirection:o.field===n.TOTAL?o.direction:null}),"Total"),(0,e.h)("th",{className:(0,t.css)(E.numericCell),onClick:e=>this.onSortClick(n.SELF,e)},(0,e.h)(p,{activeDirection:o.field===n.SELF?o.direction:null}),"Self"),(0,e.h)("th",{className:(0,t.css)(E.textCell),onClick:e=>this.onSortClick(n.SYMBOL_NAME,e)},(0,e.h)(p,{activeDirection:o.field===n.SYMBOL_NAME?o.direction:null}),"Symbol Name")))),(0,e.h)(s.ScrollableListView,{ref:this.listViewRef,axis:"y",items:l,className:(0,t.css)(E.scrollView),renderItems:(o,r)=>{const s=[];for(let e=o;e<=r;e++)s.push(this.renderRow(i[e],e));return(0,e.h)("table",{className:(0,t.css)(E.tableView)},s)}}))}}exports.ProfileTableView=S;const E=t.StyleSheet.create({profileTableView:{background:r.Colors.WHITE,height:"100%"},scrollView:{overflowY:"auto",overflowX:"hidden"},tableView:{width:"100%",fontSize:r.FontSize.LABEL,background:r.Colors.WHITE},tableHeader:{borderBottom:`2px solid ${r.Colors.LIGHT_GRAY}`,textAlign:"left",color:r.Colors.GRAY,userSelect:"none"},sortIcon:{position:"relative",top:1,marginRight:r.Sizes.FRAME_HEIGHT/4},tableRow:{height:r.Sizes.FRAME_HEIGHT},tableRowEven:{background:r.Colors.OFF_WHITE},tableRowSelected:{background:r.Colors.DARK_BLUE,color:r.Colors.WHITE},numericCell:{textOverflow:"ellipsis",overflow:"hidden",whiteSpace:"nowrap",position:"relative",textAlign:"right",paddingRight:r.Sizes.FRAME_HEIGHT,width:6*r.Sizes.FRAME_HEIGHT,minWidth:6*r.Sizes.FRAME_HEIGHT},textCell:{textOverflow:"ellipsis",overflow:"hidden",whiteSpace:"nowrap",width:"100%",maxWidth:0},hBarDisplay:{position:"absolute",background:r.Colors.TRANSPARENT_GREEN,bottom:2,height:2,width:`calc(100% - ${2*r.Sizes.FRAME_HEIGHT}px)`,right:r.Sizes.FRAME_HEIGHT},hBarDisplayFilled:{height:"100%",position:"absolute",background:r.Colors.GREEN,right:0}}),m=exports.ProfileTableViewContainer=(0,a.createContainer)(S,(e,t,o)=>{const{activeProfileState:r}=o,{profile:i,sandwichViewState:s,index:a}=r;if(!i)throw new Error("profile missing");const{tableSortMethod:n}=e,{callerCallee:h}=s,d=h?h.selectedFrame:null,p=(0,c.getFrameToColorBucket)(i),S=(0,c.createGetCSSColorForFrame)(p);return{profile:i,profileIndex:r.index,selectedFrame:d,getCSSColorForFrame:S,sortMethod:n,setSelectedFrame:e=>{t(l.actions.sandwichView.setSelectedFrame({profileIndex:a,args:e}))},setSortMethod:e=>{t(l.actions.sandwichView.setTableSortMethod(e))}}}); +},{"preact":24,"aphrodite":68,"../lib/utils":70,"./style":79,"./color-chit":106,"./scrollable-list-view":108,"../store/actions":40,"../lib/typed-redux":36,"../store/getters":38}],29:[function(require,module,exports) { +"use strict";Object.defineProperty(exports,"__esModule",{value:!0}),exports.canUseXHR=exports.ViewMode=void 0,exports.createApplicationStore=d;var e=require("./actions"),t=require("redux"),r=n(t),o=require("../lib/typed-redux"),s=require("../lib/hash-params"),i=require("./profiles-state"),a=require("../views/profile-table-view");function n(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var r in e)Object.prototype.hasOwnProperty.call(e,r)&&(t[r]=e[r]);return t.default=e,t}var c=exports.ViewMode=void 0;!function(e){e[e.CHRONO_FLAME_CHART=0]="CHRONO_FLAME_CHART",e[e.LEFT_HEAVY_FLAME_GRAPH=1]="LEFT_HEAVY_FLAME_GRAPH",e[e.SANDWICH_VIEW=2]="SANDWICH_VIEW"}(c||(exports.ViewMode=c={}));const l=window.location.protocol,u=exports.canUseXHR="http:"===l||"https:"===l;function d(t){const n=(0,s.getHashParams)(),l=u&&null!=n.profileURL,d=r.combineReducers({profileGroup:i.profileGroup,hashParams:(0,o.setter)(e.actions.setHashParams,n),flattenRecursion:(0,o.setter)(e.actions.setFlattenRecursion,!1),viewMode:(0,o.setter)(e.actions.setViewMode,c.CHRONO_FLAME_CHART),glCanvas:(0,o.setter)(e.actions.setGLCanvas,null),dragActive:(0,o.setter)(e.actions.setDragActive,!1),loading:(0,o.setter)(e.actions.setLoading,l),error:(0,o.setter)(e.actions.setError,!1),tableSortMethod:(0,o.setter)(e.actions.sandwichView.setTableSortMethod,{field:a.SortField.SELF,direction:a.SortDirection.DESCENDING})});return r.createStore(d,t)} +},{"./actions":40,"redux":31,"../lib/typed-redux":36,"../lib/hash-params":50,"./profiles-state":52,"../views/profile-table-view":55}],81:[function(require,module,exports) { +"use strict";function e(e){return e.replace(/\\([a-fA-F0-9]{2})/g,(e,n)=>{const t=parseInt(n,16);return String.fromCharCode(t)})}function n(n){const t=n.split("\n");if(!t.length)return null;if(""===t[t.length-1]&&t.pop(),!t.length)return null;const r=new Map,o=/^(\d+):(.+)$/,s=/^([\$\w]+):([\$\w-]+)$/;for(const n of t){const t=o.exec(n);if(t){r.set(`wasm-function[${t[1]}]`,e(t[2]));continue}const c=s.exec(n);if(!c)return null;r.set(c[1],e(c[2]))}return r}Object.defineProperty(exports,"__esModule",{value:!0}),exports.importEmscriptenSymbolMap=n; +},{}],142:[function(require,module,exports) { +"use strict";Object.defineProperty(exports,"__esModule",{value:!0}),exports.Flamechart=void 0;var t=require("./utils"),e=require("./math");class r{constructor(e){this.source=e,this.layers=[],this.totalWeight=0,this.minFrameWidth=1;const r=[];this.minFrameWidth=1/0;this.totalWeight=e.getTotalWeight(),e.forEachCall((e,i)=>{const s=(0,t.lastOf)(r),h={node:e,parent:s,children:[],start:i,end:i};s&&s.children.push(h),r.push(h)},(t,e)=>{console.assert(r.length>0);const i=r.pop();if(i.end=e,i.end-i.start==0)return;const s=r.length;for(;this.layers.length<=s;)this.layers.push([]);this.layers[s].push(i),this.minFrameWidth=Math.min(this.minFrameWidth,i.end-i.start)}),isFinite(this.minFrameWidth)||(this.minFrameWidth=1)}getTotalWeight(){return this.totalWeight}getLayers(){return this.layers}getColorBucketForFrame(t){return this.source.getColorBucketForFrame(t)}getMinFrameWidth(){return this.minFrameWidth}formatValue(t){return this.source.formatValue(t)}getClampedViewportWidth(t){const r=this.getTotalWeight(),i=Math.pow(2,40),s=(0,e.clamp)(3*this.getMinFrameWidth(),r/i,r);return(0,e.clamp)(t,s,r)}}exports.Flamechart=r; +},{"./utils":70,"./math":102}],143:[function(require,module,exports) { +"use strict";Object.defineProperty(exports,"__esModule",{value:!0}),exports.FlamechartRenderer=exports.FlamechartRowAtlasKey=void 0;var e=require("./rectangle-batch-renderer"),t=require("../lib/math"),r=require("../lib/color"),n=require("../lib/utils"),s=require("./graphics"),o=require("./utils");const c=1e4;class i{constructor(e,t,r){this.batch=e,this.bounds=t,this.numPrecedingRectanglesInRow=r,this.children=[]}getBatch(){return this.batch}getBounds(){return this.bounds}getRectCount(){return this.batch.getRectCount()}getChildren(){return this.children}getParity(){return this.numPrecedingRectanglesInRow%2}forEachLeafNodeWithinBounds(e,t){this.bounds.hasIntersectionWith(e)&&t(this)}}class h{constructor(e){if(this.children=e,this.rectCount=0,0===e.length)throw new Error("Empty interior node");let r=1/0,n=-1/0,s=1/0,o=-1/0;for(let t of e){this.rectCount+=t.getRectCount();const e=t.getBounds();r=Math.min(r,e.left()),n=Math.max(n,e.right()),s=Math.min(s,e.top()),o=Math.max(o,e.bottom())}this.bounds=new t.Rect(new t.Vec2(r,s),new t.Vec2(n-r,o-s))}getBounds(){return this.bounds}getRectCount(){return this.rectCount}getChildren(){return this.children}forEachLeafNodeWithinBounds(e,t){if(this.bounds.hasIntersectionWith(e))for(let r of this.children)r.forEachLeafNodeWithinBounds(e,t)}}class a{get key(){return`${this.stackDepth}_${this.index}_${this.zoomLevel}`}constructor(e){this.stackDepth=e.stackDepth,this.zoomLevel=e.zoomLevel,this.index=e.index}static getOrInsert(e,t){return e.getOrInsert(new a(t))}}exports.FlamechartRowAtlasKey=a;class l{constructor(s,o,a,l,g,d={inverted:!1}){this.gl=s,this.rowAtlas=o,this.flamechart=a,this.rectangleBatchRenderer=l,this.colorPassRenderer=g,this.options=d,this.layers=[],this.rectInfoTexture=null,this.rectInfoRenderTarget=null,this.atlasKeys=new n.KeyedSet;const f=a.getLayers().length;for(let n=0;n=c&&(s.push(new i(u,new t.Rect(new t.Vec2(l,o),new t.Vec2(g-l,1)),R)),l=1/0,g=-1/0,u=new e.RectangleBatch(this.gl));const d=new t.Rect(new t.Vec2(a.start,o),new t.Vec2(a.end-a.start,1));l=Math.min(l,d.left()),g=Math.max(g,d.right());const f=new r.Color((1+h%255)/256,(1+n%255)/256,(1+this.flamechart.getColorBucketForFrame(a.node.frame))/256);u.addRect(d,f),R++}u.getRectCount()>0&&s.push(new i(u,new t.Rect(new t.Vec2(l,o),new t.Vec2(g-l,1)),R)),this.layers.push(new h(s))}}getRectInfoTexture(e,t){if(this.rectInfoTexture){const r=this.rectInfoTexture;r.width==e&&r.height==t||r.resize(e,t)}else this.rectInfoTexture=this.gl.createTexture(s.Graphics.TextureFormat.NEAREST_CLAMP,e,t);return this.rectInfoTexture}getRectInfoRenderTarget(e,t){const r=this.getRectInfoTexture(e,t);return this.rectInfoRenderTarget&&this.rectInfoRenderTarget.texture!=r&&(this.rectInfoRenderTarget.texture.free(),this.rectInfoRenderTarget.setColor(r)),this.rectInfoRenderTarget||(this.rectInfoRenderTarget=this.gl.createRenderTarget(r)),this.rectInfoRenderTarget}free(){this.rectInfoRenderTarget&&this.rectInfoRenderTarget.free(),this.rectInfoTexture&&this.rectInfoTexture.free()}configSpaceBoundsForKey(e){const{stackDepth:r,zoomLevel:n,index:s}=e,o=this.flamechart.getTotalWeight()/Math.pow(2,n),c=this.flamechart.getLayers().length,i=this.options.inverted?c-1-r:r;return new t.Rect(new t.Vec2(o*s,i),new t.Vec2(o,1))}render(e){const{configSpaceSrcRect:r,physicalSpaceDstRect:n}=e,c=[],i=t.AffineTransform.betweenRects(r,n);if(r.isEmpty())return;let h=0;for(;;){const e=a.getOrInsert(this.atlasKeys,{stackDepth:0,zoomLevel:h,index:0}),t=this.configSpaceBoundsForKey(e);if(i.transformRect(t).width(){const r=this.configSpaceBoundsForKey(t);this.layers[t.stackDepth].forEachLeafNodeWithinBounds(r,t=>{this.rectangleBatchRenderer.render({batch:t.getBatch(),configSpaceSrcRect:r,physicalSpaceDstRect:e})})});const T=this.getRectInfoRenderTarget(n.width(),n.height());(0,o.renderInto)(this.gl,T,()=>{this.gl.clear(new s.Graphics.Color(0,0,0,0));const e=new t.Rect(t.Vec2.zero,new t.Vec2(this.gl.viewport.width,this.gl.viewport.height)),n=t.AffineTransform.betweenRects(r,e);for(let e of m){const t=this.configSpaceBoundsForKey(e);this.rowAtlas.renderViaAtlas(e,n.transformRect(t))}for(let e of I){const t=this.configSpaceBoundsForKey(e),r=n.transformRect(t);this.layers[e.stackDepth].forEachLeafNodeWithinBounds(t,e=>{this.rectangleBatchRenderer.render({batch:e.getBatch(),configSpaceSrcRect:t,physicalSpaceDstRect:r})})}});const x=this.getRectInfoTexture(n.width(),n.height());this.colorPassRenderer.render({rectInfoTexture:x,srcRect:new t.Rect(t.Vec2.zero,new t.Vec2(x.width,x.height)),dstRect:n,renderOutlines:e.renderOutlines})}}exports.FlamechartRenderer=l; +},{"./rectangle-batch-renderer":118,"../lib/math":102,"../lib/color":76,"../lib/utils":70,"./graphics":42,"./utils":119}],163:[function(require,module,exports) { +"use strict";Object.defineProperty(exports,"__esModule",{value:!0}),exports.style=void 0;var e=require("aphrodite"),o=require("./style");const t=exports.style=e.StyleSheet.create({hoverCount:{color:o.Colors.GREEN},fill:{width:"100%",height:"100%",position:"absolute",left:0,top:0},minimap:{height:o.Sizes.MINIMAP_HEIGHT,borderBottom:`${o.Sizes.SEPARATOR_HEIGHT}px solid ${o.Colors.LIGHT_GRAY}`},panZoomView:{flex:1},detailView:{display:"grid",height:o.Sizes.DETAIL_VIEW_HEIGHT,overflow:"hidden",gridTemplateColumns:"120px 120px 1fr",gridTemplateRows:"repeat(4, 1fr)",borderTop:`${o.Sizes.SEPARATOR_HEIGHT}px solid ${o.Colors.LIGHT_GRAY}`,fontSize:o.FontSize.LABEL,position:"absolute",background:o.Colors.WHITE,width:"100vw",bottom:0},stackTraceViewPadding:{padding:5},stackTraceView:{height:o.Sizes.DETAIL_VIEW_HEIGHT,lineHeight:`${o.FontSize.LABEL+2}px`,overflow:"auto"},stackLine:{whiteSpace:"nowrap"},stackFileLine:{color:o.Colors.LIGHT_GRAY},statsTable:{display:"grid",gridTemplateColumns:"1fr 1fr",gridTemplateRows:`repeat(3, ${o.FontSize.LABEL+10}px)`,gridGap:"1px 1px",textAlign:"center",paddingRight:1},statsTableHeader:{gridColumn:"1 / 3"},statsTableCell:{position:"relative",display:"flex",justifyContent:"center",alignItems:"center"},thisInstanceCell:{background:o.Colors.DARK_BLUE,color:o.Colors.WHITE},allInstancesCell:{background:o.Colors.PALE_DARK_BLUE,color:o.Colors.WHITE},barDisplay:{position:"absolute",top:0,left:0,background:"rgba(0, 0, 0, 0.2)",width:"100%"}}); +},{"aphrodite":68,"./style":79}],173:[function(require,module,exports) { +"use strict";Object.defineProperty(exports,"__esModule",{value:!0}),exports.ELLIPSIS=void 0,exports.cachedMeasureTextWidth=n,exports.trimTextMid=s;var e=require("./utils");const t=exports.ELLIPSIS="…",r=new Map;let i=-1;function n(e,t){return window.devicePixelRatio!==i&&(r.clear(),i=window.devicePixelRatio),r.has(t)||r.set(t,e.measureText(t).width),r.get(t)}function o(e,r){const i=Math.floor(r/2),n=e.substr(0,i),o=e.substr(e.length-i,i);return n+t+o}function s(t,r,i){if(n(t,r)<=i)return r;const[s]=(0,e.binarySearch)(0,r.length,e=>n(t,o(r,e)),i);return o(r,s)} +},{"./utils":70}],159:[function(require,module,exports) { +"use strict";Object.defineProperty(exports,"__esModule",{value:!0}),exports.FlamechartMinimapView=void 0;var e,i=require("preact"),t=require("aphrodite"),o=require("../lib/math"),s=require("./flamechart-style"),n=require("./style"),a=require("../lib/text-utils");!function(e){e[e.DRAW_NEW_VIEWPORT=0]="DRAW_NEW_VIEWPORT",e[e.TRANSLATE_VIEWPORT=1]="TRANSLATE_VIEWPORT"}(e||(e={}));class r extends i.Component{constructor(){super(...arguments),this.container=null,this.containerRef=(e=>{this.container=e||null}),this.overlayCanvas=null,this.overlayCtx=null,this.onWindowResize=(()=>{this.onBeforeFrame()}),this.onBeforeFrame=(()=>{this.maybeClearInteractionLock(),this.resizeOverlayCanvasIfNeeded(),this.renderRects(),this.renderOverlays()}),this.renderCanvas=(()=>{this.props.canvasContext.requestFrame()}),this.frameHadWheelEvent=!1,this.framesWithoutWheelEvents=0,this.interactionLock=null,this.maybeClearInteractionLock=(()=>{this.interactionLock&&(this.frameHadWheelEvent||(this.framesWithoutWheelEvents++,this.framesWithoutWheelEvents>=2&&(this.interactionLock=null,this.framesWithoutWheelEvents=0)),this.props.canvasContext.requestFrame()),this.frameHadWheelEvent=!1}),this.onWheel=(e=>{if(e.preventDefault(),this.frameHadWheelEvent=!0,(e.metaKey||e.ctrlKey)&&"pan"!==this.interactionLock){let i=1+e.deltaY/100;e.ctrlKey&&(i=1+e.deltaY/40),i=(0,o.clamp)(i,.1,10),this.zoom(i)}else"zoom"!==this.interactionLock&&this.pan(new o.Vec2(e.deltaX,e.deltaY));this.renderCanvas()}),this.dragStartConfigSpaceMouse=null,this.dragConfigSpaceViewportOffset=null,this.draggingMode=null,this.onMouseDown=(i=>{const t=this.configSpaceMouse(i);t&&(this.props.configSpaceViewportRect.contains(t)?(this.draggingMode=e.TRANSLATE_VIEWPORT,this.dragConfigSpaceViewportOffset=t.minus(this.props.configSpaceViewportRect.origin)):this.draggingMode=e.DRAW_NEW_VIEWPORT,this.dragStartConfigSpaceMouse=t,window.addEventListener("mousemove",this.onWindowMouseMove),window.addEventListener("mouseup",this.onWindowMouseUp),this.updateCursor(t))}),this.onWindowMouseMove=(i=>{if(!this.dragStartConfigSpaceMouse)return;let t=this.configSpaceMouse(i);if(t)if(this.updateCursor(t),t=new o.Rect(new o.Vec2(0,0),this.configSpaceSize()).closestPointTo(t),this.draggingMode===e.DRAW_NEW_VIEWPORT){const e=this.dragStartConfigSpaceMouse;let i=t;if(!e||!i)return;const s=Math.min(e.x,i.x),n=Math.max(e.x,i.x)-s,a=this.props.configSpaceViewportRect.height();this.props.setConfigSpaceViewportRect(new o.Rect(new o.Vec2(s,i.y-a/2),new o.Vec2(n,a)))}else if(this.draggingMode===e.TRANSLATE_VIEWPORT){if(!this.dragConfigSpaceViewportOffset)return;const e=t.minus(this.dragConfigSpaceViewportOffset);this.props.setConfigSpaceViewportRect(this.props.configSpaceViewportRect.withOrigin(e))}}),this.updateCursor=(i=>{this.draggingMode===e.TRANSLATE_VIEWPORT?(document.body.style.cursor="grabbing",document.body.style.cursor="-webkit-grabbing"):this.draggingMode===e.DRAW_NEW_VIEWPORT?document.body.style.cursor="col-resize":this.props.configSpaceViewportRect.contains(i)?(document.body.style.cursor="grab",document.body.style.cursor="-webkit-grab"):document.body.style.cursor="col-resize"}),this.onMouseLeave=(()=>{null==this.draggingMode&&(document.body.style.cursor="default")}),this.onMouseMove=(e=>{const i=this.configSpaceMouse(e);i&&this.updateCursor(i)}),this.onWindowMouseUp=(e=>{this.draggingMode=null,window.removeEventListener("mousemove",this.onWindowMouseMove),window.removeEventListener("mouseup",this.onWindowMouseUp);const i=this.configSpaceMouse(e);i&&this.updateCursor(i)}),this.overlayCanvasRef=(e=>{e?(this.overlayCanvas=e,this.overlayCtx=this.overlayCanvas.getContext("2d"),this.renderCanvas()):(this.overlayCanvas=null,this.overlayCtx=null)})}physicalViewSize(){return new o.Vec2(this.overlayCanvas?this.overlayCanvas.width:0,this.overlayCanvas?this.overlayCanvas.height:0)}minimapOrigin(){return new o.Vec2(0,n.Sizes.FRAME_HEIGHT*window.devicePixelRatio)}configSpaceSize(){return new o.Vec2(this.props.flamechart.getTotalWeight(),this.props.flamechart.getLayers().length)}configSpaceToPhysicalViewSpace(){const e=this.minimapOrigin();return o.AffineTransform.betweenRects(new o.Rect(new o.Vec2(0,0),this.configSpaceSize()),new o.Rect(e,this.physicalViewSize().minus(e)))}logicalToPhysicalViewSpace(){return o.AffineTransform.withScale(new o.Vec2(window.devicePixelRatio,window.devicePixelRatio))}windowToLogicalViewSpace(){if(!this.container)return new o.AffineTransform;const e=this.container.getBoundingClientRect();return o.AffineTransform.withTranslation(new o.Vec2(-e.left,-e.top))}renderRects(){this.container&&(this.physicalViewSize().x<2||this.props.canvasContext.renderBehind(this.container,()=>{this.props.flamechartRenderer.render({configSpaceSrcRect:new o.Rect(new o.Vec2(0,0),this.configSpaceSize()),physicalSpaceDstRect:new o.Rect(this.minimapOrigin(),this.physicalViewSize().minus(this.minimapOrigin())),renderOutlines:!1}),this.props.canvasContext.viewportRectangleRenderer.render({configSpaceViewportRect:this.props.configSpaceViewportRect,configSpaceToPhysicalViewSpace:this.configSpaceToPhysicalViewSpace()})}))}renderOverlays(){const e=this.overlayCtx;if(!e)return;const i=this.physicalViewSize();e.clearRect(0,0,i.x,i.y);const t=this.configSpaceToPhysicalViewSpace(),s=this.configSpaceSize().x,r=(this.configSpaceToPhysicalViewSpace().inverted()||new o.AffineTransform).times(this.logicalToPhysicalViewSpace()).transformVector(new o.Vec2(200,1)).x,c=n.Sizes.FRAME_HEIGHT*window.devicePixelRatio,h=n.FontSize.LABEL*window.devicePixelRatio,l=(c-h)/2;e.font=`${h}px/${c}px ${n.FontFamily.MONOSPACE}`,e.textBaseline="top";let p=Math.pow(10,Math.floor(Math.log10(r)));r/p>5?p*=5:r/p>2&&(p*=2),e.fillStyle="rgba(255, 255, 255, 0.8)",e.fillRect(0,0,i.x,c),e.textBaseline="top",e.fillStyle=n.Colors.DARK_GRAY;for(let n=Math.ceil(0/p)*p;n ")),i.push(c.name),c.file){let l=c.file;c.line&&(l+=`:${c.line}`,c.col&&(l+=`:${c.col}`)),i.push((0,s.h)("span",{className:(0,e.css)(t.style.stackFileLine)}," (",l,")"))}l.push((0,s.h)("div",{className:(0,e.css)(t.style.stackLine)},i))}return(0,s.h)("div",{className:(0,e.css)(t.style.stackTraceView)},(0,s.h)("div",{className:(0,e.css)(t.style.stackTraceViewPadding)},l))}}class c extends s.Component{render(){const{flamechart:l,selectedNode:a}=this.props,{frame:c}=a;return(0,s.h)("div",{className:(0,e.css)(t.style.detailView)},(0,s.h)(r,{title:"This Instance",cellStyle:t.style.thisInstanceCell,grandTotal:l.getTotalWeight(),selectedTotal:a.getTotalWeight(),selectedSelf:a.getSelfWeight(),formatter:l.formatValue.bind(l)}),(0,s.h)(r,{title:"All Instances",cellStyle:t.style.allInstancesCell,grandTotal:l.getTotalWeight(),selectedTotal:c.getTotalWeight(),selectedSelf:c.getSelfWeight(),formatter:l.formatValue.bind(l)}),(0,s.h)(i,{node:a,getFrameColor:this.props.getCSSColorForFrame}))}}exports.FlamechartDetailView=c; +},{"aphrodite":68,"preact":24,"./flamechart-style":163,"../lib/utils":70,"./color-chit":106}],161:[function(require,module,exports) { +"use strict";Object.defineProperty(exports,"__esModule",{value:!0}),exports.FlamechartPanZoomView=void 0;var e=require("../lib/math"),t=require("./style"),i=require("../lib/text-utils"),o=require("./flamechart-style"),s=require("preact"),n=require("aphrodite");class r extends s.Component{constructor(){super(...arguments),this.container=null,this.containerRef=(e=>{this.container=e||null}),this.overlayCanvas=null,this.overlayCtx=null,this.hoveredLabel=null,this.overlayCanvasRef=(e=>{e?(this.overlayCanvas=e,this.overlayCtx=this.overlayCanvas.getContext("2d"),this.renderCanvas()):(this.overlayCanvas=null,this.overlayCtx=null)}),this.LOGICAL_VIEW_SPACE_FRAME_HEIGHT=t.Sizes.FRAME_HEIGHT,this.onWindowResize=(()=>{this.updateConfigSpaceViewport(),this.onBeforeFrame()}),this.frameHadWheelEvent=!1,this.framesWithoutWheelEvents=0,this.interactionLock=null,this.maybeClearInteractionLock=(()=>{this.interactionLock&&(this.frameHadWheelEvent||(this.framesWithoutWheelEvents++,this.framesWithoutWheelEvents>=2&&(this.interactionLock=null,this.framesWithoutWheelEvents=0)),this.props.canvasContext.requestFrame()),this.frameHadWheelEvent=!1}),this.onBeforeFrame=(()=>{this.resizeOverlayCanvasIfNeeded(),this.renderRects(),this.renderOverlays(),this.maybeClearInteractionLock()}),this.renderCanvas=(()=>{this.props.canvasContext.requestFrame()}),this.lastDragPos=null,this.mouseDownPos=null,this.onMouseDown=(t=>{this.mouseDownPos=this.lastDragPos=new e.Vec2(t.offsetX,t.offsetY),this.updateCursor(),window.addEventListener("mouseup",this.onWindowMouseUp)}),this.onMouseDrag=(t=>{if(!this.lastDragPos)return;const i=new e.Vec2(t.offsetX,t.offsetY);this.pan(this.lastDragPos.minus(i)),this.lastDragPos=i,this.hoveredLabel&&this.props.onNodeHover(null)}),this.onDblClick=(t=>{if(this.hoveredLabel){const t=this.hoveredLabel.configSpaceBounds,i=new e.Rect(t.origin.minus(new e.Vec2(0,1)),t.size.withY(this.props.configSpaceViewportRect.height()));this.props.setConfigSpaceViewportRect(i)}}),this.onClick=(t=>{const i=new e.Vec2(t.offsetX,t.offsetY),o=this.mouseDownPos;this.mouseDownPos=null,o&&i.minus(o).length()>5||(this.hoveredLabel?(this.props.onNodeSelect(this.hoveredLabel.node),this.renderCanvas()):this.props.onNodeSelect(null))}),this.onWindowMouseUp=(e=>{this.lastDragPos=null,this.updateCursor(),window.removeEventListener("mouseup",this.onWindowMouseUp)}),this.onMouseMove=(t=>{if(this.updateCursor(),this.lastDragPos)return t.preventDefault(),void this.onMouseDrag(t);this.hoveredLabel=null;const i=new e.Vec2(t.offsetX,t.offsetY),o=this.logicalToPhysicalViewSpace().transformPosition(i),s=this.configSpaceToPhysicalViewSpace().inverseTransformPosition(o);if(!s)return;const n=(t,i=0)=>{const o=t.end-t.start,r=this.props.renderInverted?this.configSpaceSize().y-1-i:i,a=new e.Rect(new e.Vec2(t.start,r),new e.Vec2(o,1));if(s.xa.right())return null;a.contains(s)&&(this.hoveredLabel={configSpaceBounds:a,node:t.node});for(let e of t.children)n(e,i+1)};for(let e of this.props.flamechart.getLayers()[0]||[])n(e);this.hoveredLabel?this.props.onNodeHover({node:this.hoveredLabel.node,event:t}):this.props.onNodeHover(null),this.renderCanvas()}),this.onMouseLeave=(e=>{this.hoveredLabel=null,this.props.onNodeHover(null),this.renderCanvas()}),this.onWheel=(t=>{t.preventDefault(),this.frameHadWheelEvent=!0;const i=t.metaKey||t.ctrlKey;let o=t.deltaY,s=t.deltaX;if(t.deltaMode===t.DOM_DELTA_LINE&&(o*=this.LOGICAL_VIEW_SPACE_FRAME_HEIGHT,s*=this.LOGICAL_VIEW_SPACE_FRAME_HEIGHT),i&&"pan"!==this.interactionLock){let i=1+o/100;t.ctrlKey&&(i=1+o/40),i=(0,e.clamp)(i,.1,10),this.zoom(new e.Vec2(t.offsetX,t.offsetY),i)}else"zoom"!==this.interactionLock&&this.pan(new e.Vec2(s,o));this.renderCanvas()}),this.onWindowKeyPress=(t=>{if(!this.container)return;const{width:i,height:o}=this.container.getBoundingClientRect();"="===t.key||"+"===t.key?(this.zoom(new e.Vec2(i/2,o/2),.5),t.preventDefault()):"-"!==t.key&&"_"!==t.key||(this.zoom(new e.Vec2(i/2,o/2),2),t.preventDefault()),t.ctrlKey||t.shiftKey||t.metaKey||("0"===t.key?this.zoom(new e.Vec2(i/2,o/2),1e9):"ArrowRight"===t.key||"KeyD"===t.code?this.pan(new e.Vec2(100,0)):"ArrowLeft"===t.key||"KeyA"===t.code?this.pan(new e.Vec2(-100,0)):"ArrowUp"===t.key||"KeyW"===t.code?this.pan(new e.Vec2(0,-100)):"ArrowDown"===t.key||"KeyS"===t.code?this.pan(new e.Vec2(0,100)):"Escape"===t.key&&(this.props.onNodeSelect(null),this.renderCanvas()))})}setConfigSpaceViewportRect(e){this.props.setConfigSpaceViewportRect(e)}configSpaceSize(){return new e.Vec2(this.props.flamechart.getTotalWeight(),this.props.flamechart.getLayers().length)}physicalViewSize(){return new e.Vec2(this.overlayCanvas?this.overlayCanvas.width:0,this.overlayCanvas?this.overlayCanvas.height:0)}physicalBounds(){if(this.props.renderInverted){const t=this.physicalViewSize().y,i=(this.configSpaceSize().y+1)*this.LOGICAL_VIEW_SPACE_FRAME_HEIGHT*window.devicePixelRatio;if(i{const f=t.end-t.start,w=this.props.renderInverted?this.configSpaceSize().y-1-d:d,u=new e.Rect(new e.Vec2(t.start,w),new e.Vec2(f,1));if(!(fthis.props.configSpaceViewportRect.right()||u.right()this.props.configSpaceViewportRect.bottom())return;if(u.hasIntersectionWith(this.props.configSpaceViewportRect)){let e=s.transformRect(u);if(e.left()<0&&(e=e.withOrigin(e.origin.withX(0)).withSize(e.size.withX(e.size.x+e.left()))),e.right()>a.x&&(e=e.withSize(e.size.withX(a.x-e.left()))),e.width()>h){const s=(0,i.trimTextMid)(o,t.node.frame.name,e.width()-2*l);o.fillText(s,e.left()+l,Math.round(e.bottom()-(r-n)/2))}}for(let e of t.children)p(e,d+1)}};for(let e of this.props.flamechart.getLayers()[0]||[])p(e);const d=2*window.devicePixelRatio;o.strokeStyle=t.Colors.PALE_DARK_BLUE,o.lineWidth=d;const f=(s.inverseTransformVector(new e.Vec2(1,0))||new e.Vec2(0,0)).x,w=(i,n=0)=>{if(!this.props.selectedNode)return;const r=i.end-i.start,a=this.props.renderInverted?this.configSpaceSize().y-1-n:n,h=new e.Rect(new e.Vec2(i.start,a),new e.Vec2(r,1));if(!(rthis.props.configSpaceViewportRect.right()||h.right()this.props.configSpaceViewportRect.bottom())){if(h.hasIntersectionWith(this.props.configSpaceViewportRect)){const e=s.transformRect(h);i.node.frame===this.props.selectedNode.frame&&(i.node===this.props.selectedNode?o.strokeStyle!==t.Colors.DARK_BLUE&&(o.stroke(),o.beginPath(),o.strokeStyle=t.Colors.DARK_BLUE):o.strokeStyle!==t.Colors.PALE_DARK_BLUE&&(o.stroke(),o.beginPath(),o.strokeStyle=t.Colors.PALE_DARK_BLUE),o.rect(Math.round(e.left()+1+d/2),Math.round(e.top()+1+d/2),Math.round(Math.max(0,e.width()-2-d)),Math.round(Math.max(0,e.height()-2-d))))}for(let e of i.children)w(e,n+1)}};o.beginPath();for(let e of this.props.flamechart.getLayers()[0]||[])w(e);o.stroke(),this.renderTimeIndicators()}renderTimeIndicators(){const o=this.overlayCtx;if(!o)return;const s=this.LOGICAL_VIEW_SPACE_FRAME_HEIGHT*window.devicePixelRatio,n=this.physicalViewSize(),r=this.configSpaceToPhysicalViewSpace(),a=(s-t.FontSize.LABEL*window.devicePixelRatio)/2,h=this.props.configSpaceViewportRect.left(),c=this.props.configSpaceViewportRect.right(),l=(this.configSpaceToPhysicalViewSpace().inverted()||new e.AffineTransform).times(this.logicalToPhysicalViewSpace()).transformVector(new e.Vec2(200,1)).x;let p=Math.pow(10,Math.floor(Math.log10(l)));l/p>5?p*=5:l/p>2&&(p*=2);{const l=this.props.renderInverted?n.y-s:0;o.fillStyle="rgba(255, 255, 255, 0.8)",o.fillRect(0,l,n.x,s),o.fillStyle=t.Colors.DARK_GRAY,o.textBaseline="top";for(let t=Math.ceil(h/p)*p;t{this.props.flamechartRenderer.render({physicalSpaceDstRect:this.physicalBounds(),configSpaceSrcRect:this.props.configSpaceViewportRect,renderOutlines:!0})}))}pan(t){this.interactionLock="pan";const i=this.logicalToPhysicalViewSpace().transformVector(t),o=this.configSpaceToPhysicalViewSpace().inverseTransformVector(i);this.hoveredLabel&&this.props.onNodeHover(null),o&&this.props.transformViewport(e.AffineTransform.withTranslation(o))}zoom(t,i){this.interactionLock="zoom";const o=this.logicalToPhysicalViewSpace().transformPosition(t),s=this.configSpaceToPhysicalViewSpace().inverseTransformPosition(o);if(!s)return;const n=e.AffineTransform.withTranslation(s.times(-1)).scaledBy(new e.Vec2(i,1)).translatedBy(s);this.props.transformViewport(n)}updateCursor(){this.lastDragPos?(document.body.style.cursor="grabbing",document.body.style.cursor="-webkit-grabbing"):document.body.style.cursor="default"}shouldComponentUpdate(){return!1}componentWillReceiveProps(e){this.props.flamechart!==e.flamechart?(this.hoveredLabel=null,this.renderCanvas()):this.props.selectedNode!==e.selectedNode?this.renderCanvas():this.props.configSpaceViewportRect!==e.configSpaceViewportRect&&this.renderCanvas()}componentDidMount(){this.props.canvasContext.addBeforeFrameHandler(this.onBeforeFrame),window.addEventListener("resize",this.onWindowResize),window.addEventListener("keydown",this.onWindowKeyPress)}componentWillUnmount(){this.props.canvasContext.removeBeforeFrameHandler(this.onBeforeFrame),window.removeEventListener("resize",this.onWindowResize),window.removeEventListener("keydown",this.onWindowKeyPress)}render(){return(0,s.h)("div",{className:(0,n.css)(o.style.panZoomView,t.commonStyle.vbox),onMouseDown:this.onMouseDown,onMouseMove:this.onMouseMove,onMouseLeave:this.onMouseLeave,onClick:this.onClick,onDblClick:this.onDblClick,onWheel:this.onWheel,ref:this.containerRef},(0,s.h)("canvas",{width:1,height:1,ref:this.overlayCanvasRef,className:(0,n.css)(o.style.fill)}))}}exports.FlamechartPanZoomView=r; +},{"../lib/math":102,"./style":79,"../lib/text-utils":173,"./flamechart-style":163,"preact":24,"aphrodite":68}],162:[function(require,module,exports) { +"use strict";Object.defineProperty(exports,"__esModule",{value:!0}),exports.Hovertip=void 0;var e=require("./style"),o=require("aphrodite"),t=require("preact");class i extends t.Component{render(){const{containerSize:i,offset:r}=this.props,n=i.x,p=i.y,d={};return r.x+7+e.Sizes.TOOLTIP_WIDTH_MAX{const t=a.Sizes.DETAIL_VIEW_HEIGHT/a.Sizes.FRAME_HEIGHT,i=this.configSpaceSize(),o=this.props.flamechart.getClampedViewportWidth(e.size.x),s=e.size.withX(o),c=r.Vec2.clamp(e.origin,new r.Vec2(0,-1),r.Vec2.max(r.Vec2.zero,i.minus(s).plus(new r.Vec2(0,t+1))));this.props.setConfigSpaceViewportRect(new r.Rect(c,e.size.withX(o)))}),this.setLogicalSpaceViewportSize=(e=>{this.props.setLogicalSpaceViewportSize(e)}),this.transformViewport=(e=>{const t=e.transformRect(this.props.configSpaceViewportRect);this.setConfigSpaceViewportRect(t)}),this.onNodeHover=(e=>{this.props.setNodeHover(e)}),this.onNodeClick=(e=>{this.props.setSelectedNode(e)}),this.container=null,this.containerRef=(e=>{this.container=e||null})}configSpaceSize(){return new r.Vec2(this.props.flamechart.getTotalWeight(),this.props.flamechart.getLayers().length)}formatValue(e){const t=100*e/this.props.flamechart.getTotalWeight(),r=(0,i.formatPercent)(t);return`${this.props.flamechart.formatValue(e)} (${r})`}renderTooltip(){if(!this.container)return null;const{hover:i}=this.props;if(!i)return null;const{width:o,height:a,left:c,top:p}=this.container.getBoundingClientRect(),h=new r.Vec2(i.event.clientX-c,i.event.clientY-p);return(0,e.h)(n.Hovertip,{containerSize:new r.Vec2(o,a),offset:h},(0,e.h)("span",{className:(0,t.css)(s.style.hoverCount)},this.formatValue(i.node.getTotalWeight()))," ",i.node.frame.name)}render(){return(0,e.h)("div",{className:(0,t.css)(s.style.fill,a.commonStyle.vbox),ref:this.containerRef},(0,e.h)(o.FlamechartMinimapView,{configSpaceViewportRect:this.props.configSpaceViewportRect,transformViewport:this.transformViewport,flamechart:this.props.flamechart,flamechartRenderer:this.props.flamechartRenderer,canvasContext:this.props.canvasContext,setConfigSpaceViewportRect:this.setConfigSpaceViewportRect}),(0,e.h)(p.FlamechartPanZoomView,{canvasContext:this.props.canvasContext,flamechart:this.props.flamechart,flamechartRenderer:this.props.flamechartRenderer,renderInverted:!1,onNodeHover:this.onNodeHover,onNodeSelect:this.onNodeClick,selectedNode:this.props.selectedNode,transformViewport:this.transformViewport,configSpaceViewportRect:this.props.configSpaceViewportRect,setConfigSpaceViewportRect:this.setConfigSpaceViewportRect,logicalSpaceViewportSize:this.props.logicalSpaceViewportSize,setLogicalSpaceViewportBounds:this.setLogicalSpaceViewportSize}),this.renderTooltip(),this.props.selectedNode&&(0,e.h)(c.FlamechartDetailView,{flamechart:this.props.flamechart,getCSSColorForFrame:this.props.getCSSColorForFrame,selectedNode:this.props.selectedNode}))}}exports.FlamechartView=l; +},{"preact":24,"aphrodite":68,"../lib/math":102,"../lib/utils":70,"./flamechart-minimap-view":159,"./flamechart-style":163,"./style":79,"./flamechart-detail-view":160,"./flamechart-pan-zoom-view":161,"./hovertip":162,"../lib/typed-redux":36}],62:[function(require,module,exports) { +"use strict";Object.defineProperty(exports,"__esModule",{value:!0}),exports.LeftHeavyFlamechartView=exports.getLeftHeavyFlamechart=exports.ChronoFlamechartView=exports.createMemoizedFlamechartRenderer=exports.getChronoViewFlamechart=void 0,exports.createFlamechartSetters=n;var e=require("../store/flamechart-view-state"),t=require("../lib/flamechart"),r=require("../gl/flamechart-renderer"),a=require("../lib/typed-redux"),o=require("../lib/utils"),l=require("./flamechart-view"),c=require("../store/getters"),i=require("../store/actions");function n(e,t,r){function a(a,o){return l=>{const c=Object.assign({},o(l),{id:t});e(a({profileIndex:r,args:c}))}}const{setHoveredNode:o,setLogicalSpaceViewportSize:l,setConfigSpaceViewportRect:c,setSelectedNode:n}=i.actions.flamechart;return{setNodeHover:a(o,e=>({hover:e})),setLogicalSpaceViewportSize:a(l,e=>({logicalSpaceViewportSize:e})),setConfigSpaceViewportRect:a(c,e=>({configSpaceViewportRect:e})),setSelectedNode:a(n,e=>({selectedNode:e}))}}const m=exports.getChronoViewFlamechart=(0,o.memoizeByShallowEquality)(({profile:e,getColorBucketForFrame:r})=>new t.Flamechart({getTotalWeight:e.getTotalWeight.bind(e),forEachCall:e.forEachCall.bind(e),formatValue:e.formatValue.bind(e),getColorBucketForFrame:r})),s=exports.createMemoizedFlamechartRenderer=(e=>(0,o.memoizeByShallowEquality)(({canvasContext:t,flamechart:a})=>new r.FlamechartRenderer(t.gl,(0,c.getRowAtlas)(t),a,t.rectangleBatchRenderer,t.flamechartColorPassRenderer,e))),h=s(),F=exports.ChronoFlamechartView=(0,a.createContainer)(l.FlamechartView,(t,r,a)=>{const{activeProfileState:o,glCanvas:l}=a,{index:i,profile:s,chronoViewState:F}=o,C=(0,c.getCanvasContext)(l),f=(0,c.getFrameToColorBucket)(s),g=(0,c.createGetColorBucketForFrame)(f),d=(0,c.createGetCSSColorForFrame)(f),u=m({profile:s,getColorBucketForFrame:g}),p=h({canvasContext:C,flamechart:u});return Object.assign({renderInverted:!1,flamechart:u,flamechartRenderer:p,canvasContext:C,getCSSColorForFrame:d},n(r,e.FlamechartID.CHRONO,i),F)}),C=exports.getLeftHeavyFlamechart=(0,o.memoizeByShallowEquality)(({profile:e,getColorBucketForFrame:r})=>new t.Flamechart({getTotalWeight:e.getTotalNonIdleWeight.bind(e),forEachCall:e.forEachCallGrouped.bind(e),formatValue:e.formatValue.bind(e),getColorBucketForFrame:r})),f=s(),g=exports.LeftHeavyFlamechartView=(0,a.createContainer)(l.FlamechartView,(t,r,a)=>{const{activeProfileState:o,glCanvas:l}=a,{index:i,profile:m,leftHeavyViewState:s}=o,h=(0,c.getCanvasContext)(l),F=(0,c.getFrameToColorBucket)(m),g=(0,c.createGetColorBucketForFrame)(F),d=(0,c.createGetCSSColorForFrame)(F),u=C({profile:m,getColorBucketForFrame:g}),p=f({canvasContext:h,flamechart:u});return Object.assign({renderInverted:!1,flamechart:u,flamechartRenderer:p,canvasContext:h,getCSSColorForFrame:d},n(r,e.FlamechartID.LEFT_HEAVY,i),s)}); +},{"../store/flamechart-view-state":98,"../lib/flamechart":142,"../gl/flamechart-renderer":143,"../lib/typed-redux":36,"../lib/utils":70,"./flamechart-view":115,"../store/getters":38,"../store/actions":40}],167:[function(require,module,exports) { +"use strict";Object.defineProperty(exports,"__esModule",{value:!0}),exports.style=exports.FlamechartWrapper=void 0;var e=require("aphrodite"),t=require("preact"),r=require("./style"),o=require("../lib/math"),i=require("./flamechart-pan-zoom-view"),s=require("../lib/utils"),a=require("./hovertip"),n=require("../lib/typed-redux");class p extends n.StatelessComponent{constructor(){super(...arguments),this.setConfigSpaceViewportRect=(e=>{this.props.setConfigSpaceViewportRect(this.clampViewportToFlamegraph(e))}),this.setLogicalSpaceViewportSize=(e=>{this.props.setLogicalSpaceViewportSize(e)}),this.transformViewport=(e=>{this.setConfigSpaceViewportRect(e.transformRect(this.props.configSpaceViewportRect))}),this.container=null,this.containerRef=(e=>{this.container=e||null}),this.setNodeHover=(e=>{this.props.setNodeHover(e)})}clampViewportToFlamegraph(e){const{flamechart:t,renderInverted:r}=this.props,i=new o.Vec2(t.getTotalWeight(),t.getLayers().length),s=this.props.flamechart.getClampedViewportWidth(e.size.x),a=e.size.withX(s),n=o.Vec2.clamp(e.origin,new o.Vec2(0,r?0:-1),o.Vec2.max(o.Vec2.zero,i.minus(a).plus(new o.Vec2(0,1))));return new o.Rect(n,e.size.withX(s))}formatValue(e){const t=100*e/this.props.flamechart.getTotalWeight(),r=(0,s.formatPercent)(t);return`${this.props.flamechart.formatValue(e)} (${r})`}renderTooltip(){if(!this.container)return null;const{hover:r}=this.props;if(!r)return null;const{width:i,height:s,left:n,top:p}=this.container.getBoundingClientRect(),l=new o.Vec2(r.event.clientX-n,r.event.clientY-p);return(0,t.h)(a.Hovertip,{containerSize:new o.Vec2(i,s),offset:l},(0,t.h)("span",{className:(0,e.css)(c.hoverCount)},this.formatValue(r.node.getTotalWeight()))," ",r.node.frame.name)}render(){return(0,t.h)("div",{className:(0,e.css)(r.commonStyle.fillY,r.commonStyle.fillX,r.commonStyle.vbox),ref:this.containerRef},(0,t.h)(i.FlamechartPanZoomView,{selectedNode:null,onNodeHover:this.setNodeHover,onNodeSelect:s.noop,configSpaceViewportRect:this.props.configSpaceViewportRect,setConfigSpaceViewportRect:this.setConfigSpaceViewportRect,transformViewport:this.transformViewport,flamechart:this.props.flamechart,flamechartRenderer:this.props.flamechartRenderer,canvasContext:this.props.canvasContext,renderInverted:this.props.renderInverted,logicalSpaceViewportSize:this.props.logicalSpaceViewportSize,setLogicalSpaceViewportBounds:this.setLogicalSpaceViewportSize}),this.renderTooltip())}}exports.FlamechartWrapper=p;const c=exports.style=e.StyleSheet.create({hoverCount:{color:r.Colors.GREEN}}); +},{"aphrodite":68,"preact":24,"./style":79,"../lib/math":102,"./flamechart-pan-zoom-view":161,"../lib/utils":70,"./hovertip":162,"../lib/typed-redux":36}],137:[function(require,module,exports) { +"use strict";Object.defineProperty(exports,"__esModule",{value:!0}),exports.InvertedCallerFlamegraphView=void 0;var e=require("../lib/utils"),r=require("../lib/flamechart"),t=require("./flamechart-view-container"),a=require("../lib/typed-redux"),l=require("../store/getters"),o=require("../store/flamechart-view-state"),i=require("./flamechart-wrapper");const n=(0,e.memoizeByShallowEquality)(({profile:e,frame:r,flattenRecursion:t})=>{let a=e.getInvertedProfileForCallersOf(r);return t?a.getProfileWithRecursionFlattened():a}),c=(0,e.memoizeByShallowEquality)(({invertedCallerProfile:e,getColorBucketForFrame:t})=>new r.Flamechart({getTotalWeight:e.getTotalNonIdleWeight.bind(e),forEachCall:e.forEachCallGrouped.bind(e),formatValue:e.formatValue.bind(e),getColorBucketForFrame:t})),s=(0,t.createMemoizedFlamechartRenderer)({inverted:!0}),m=exports.InvertedCallerFlamegraphView=(0,a.createContainer)(i.FlamechartWrapper,(e,r,a)=>{const{activeProfileState:i}=a;let{profile:m,sandwichViewState:f,index:d}=i,{flattenRecursion:u,glCanvas:C}=e;if(!m)throw new Error("profile missing");if(!C)throw new Error("glCanvas missing");const{callerCallee:h}=f;if(!h)throw new Error("callerCallee missing");const{selectedFrame:F}=h;m=u?(0,l.getProfileWithRecursionFlattened)(m):m;const g=(0,l.getFrameToColorBucket)(m),v=(0,l.createGetColorBucketForFrame)(g),p=(0,l.createGetCSSColorForFrame)(g),w=(0,l.getCanvasContext)(C),S=c({invertedCallerProfile:n({profile:m,frame:F,flattenRecursion:u}),getColorBucketForFrame:v}),E=s({canvasContext:w,flamechart:S});return Object.assign({renderInverted:!0,flamechart:S,flamechartRenderer:E,canvasContext:w,getCSSColorForFrame:p},(0,t.createFlamechartSetters)(r,o.FlamechartID.SANDWICH_INVERTED_CALLERS,d),{setSelectedNode:()=>{}},h.invertedCallerFlamegraph)}); +},{"../lib/utils":70,"../lib/flamechart":142,"./flamechart-view-container":62,"../lib/typed-redux":36,"../store/getters":38,"../store/flamechart-view-state":98,"./flamechart-wrapper":167}],138:[function(require,module,exports) { +"use strict";Object.defineProperty(exports,"__esModule",{value:!0}),exports.CalleeFlamegraphView=void 0;var e=require("../lib/utils"),r=require("../lib/flamechart"),t=require("./flamechart-view-container"),a=require("../lib/typed-redux"),l=require("../store/getters"),o=require("../store/flamechart-view-state"),i=require("./flamechart-wrapper");const c=(0,e.memoizeByShallowEquality)(({profile:e,frame:r,flattenRecursion:t})=>{let a=e.getProfileForCalleesOf(r);return t?a.getProfileWithRecursionFlattened():a}),n=(0,e.memoizeByShallowEquality)(({calleeProfile:e,getColorBucketForFrame:t})=>new r.Flamechart({getTotalWeight:e.getTotalNonIdleWeight.bind(e),forEachCall:e.forEachCallGrouped.bind(e),formatValue:e.formatValue.bind(e),getColorBucketForFrame:t})),s=(0,t.createMemoizedFlamechartRenderer)(),m=exports.CalleeFlamegraphView=(0,a.createContainer)(i.FlamechartWrapper,(e,r,a)=>{const{activeProfileState:i}=a,{index:m,profile:f,sandwichViewState:u}=i,{flattenRecursion:h,glCanvas:C}=e;if(!f)throw new Error("profile missing");if(!C)throw new Error("glCanvas missing");const{callerCallee:F}=u;if(!F)throw new Error("callerCallee missing");const{selectedFrame:g}=F,d=(0,l.getFrameToColorBucket)(f),p=(0,l.createGetColorBucketForFrame)(d),w=(0,l.createGetCSSColorForFrame)(d),v=(0,l.getCanvasContext)(C),S=n({calleeProfile:c({profile:f,frame:g,flattenRecursion:h}),getColorBucketForFrame:p}),q=s({canvasContext:v,flamechart:S});return Object.assign({renderInverted:!1,flamechart:S,flamechartRenderer:q,canvasContext:v,getCSSColorForFrame:w},(0,t.createFlamechartSetters)(r,o.FlamechartID.SANDWICH_CALLEES,m),{setSelectedNode:()=>{}},F.calleeFlamegraph)}); +},{"../lib/utils":70,"../lib/flamechart":142,"./flamechart-view-container":62,"../lib/typed-redux":36,"../store/getters":38,"../store/flamechart-view-state":98,"./flamechart-wrapper":167}],60:[function(require,module,exports) { +"use strict";Object.defineProperty(exports,"__esModule",{value:!0}),exports.SandwichViewContainer=void 0;var e=require("aphrodite"),t=require("./profile-table-view"),a=require("preact"),l=require("./style"),r=require("../store/actions"),s=require("../lib/typed-redux"),i=require("./inverted-caller-flamegraph-view"),o=require("./callee-flamegraph-view");class n extends s.StatelessComponent{constructor(){super(...arguments),this.setSelectedFrame=(e=>{this.props.setSelectedFrame(e)}),this.onWindowKeyPress=(e=>{"Escape"===e.key&&this.setSelectedFrame(null)})}componentDidMount(){window.addEventListener("keydown",this.onWindowKeyPress)}componentWillUnmount(){window.removeEventListener("keydown",this.onWindowKeyPress)}render(){const{selectedFrame:r}=this.props;let s=null;return r&&(s=(0,a.h)("div",{className:(0,e.css)(l.commonStyle.fillY,c.callersAndCallees,l.commonStyle.vbox)},(0,a.h)("div",{className:(0,e.css)(l.commonStyle.hbox,c.panZoomViewWraper)},(0,a.h)("div",{className:(0,e.css)(c.flamechartLabelParent)},(0,a.h)("div",{className:(0,e.css)(c.flamechartLabel)},"Callers")),(0,a.h)(i.InvertedCallerFlamegraphView,{glCanvas:this.props.glCanvas,activeProfileState:this.props.activeProfileState})),(0,a.h)("div",{className:(0,e.css)(c.divider)}),(0,a.h)("div",{className:(0,e.css)(l.commonStyle.hbox,c.panZoomViewWraper)},(0,a.h)("div",{className:(0,e.css)(c.flamechartLabelParent,c.flamechartLabelParentBottom)},(0,a.h)("div",{className:(0,e.css)(c.flamechartLabel,c.flamechartLabelBottom)},"Callees")),(0,a.h)(o.CalleeFlamegraphView,{glCanvas:this.props.glCanvas,activeProfileState:this.props.activeProfileState})))),(0,a.h)("div",{className:(0,e.css)(l.commonStyle.hbox,l.commonStyle.fillY)},(0,a.h)("div",{className:(0,e.css)(c.tableView)},(0,a.h)(t.ProfileTableViewContainer,{activeProfileState:this.props.activeProfileState})),s)}}const c=e.StyleSheet.create({tableView:{flex:1},panZoomViewWraper:{flex:1},flamechartLabelParent:{display:"flex",flexDirection:"column",justifyContent:"flex-end",alignItems:"flex-start",fontSize:l.FontSize.TITLE,width:1.2*l.FontSize.TITLE,borderRight:`1px solid ${l.Colors.LIGHT_GRAY}`},flamechartLabelParentBottom:{justifyContent:"flex-start"},flamechartLabel:{transform:"rotate(-90deg)",transformOrigin:"50% 50% 0",width:1.2*l.FontSize.TITLE,flexShrink:1},flamechartLabelBottom:{transform:"rotate(-90deg)",display:"flex",justifyContent:"flex-end"},callersAndCallees:{flex:1,borderLeft:`${l.Sizes.SEPARATOR_HEIGHT}px solid ${l.Colors.LIGHT_GRAY}`},divider:{height:2,background:l.Colors.LIGHT_GRAY}}),d=exports.SandwichViewContainer=(0,s.createContainer)(n,(e,t,a)=>{const{activeProfileState:l,glCanvas:s}=a,{sandwichViewState:i,index:o}=l,{callerCallee:n}=i;return{activeProfileState:l,glCanvas:s,setSelectedFrame:e=>{t(r.actions.sandwichView.setSelectedFrame({profileIndex:o,args:e}))},selectedFrame:n?n.selectedFrame:null,profileIndex:o}}); +},{"aphrodite":68,"./profile-table-view":55,"preact":24,"./style":79,"../store/actions":40,"../lib/typed-redux":36,"./inverted-caller-flamegraph-view":137,"./callee-flamegraph-view":138}],140:[function(require,module,exports) { +"use strict";Object.defineProperty(exports,"__esModule",{value:!0}),exports.ByteFormatter=exports.TimeFormatter=exports.RawValueFormatter=void 0;var t=require("./utils");class e{constructor(){this.unit="none"}format(t){return t.toLocaleString()}}exports.RawValueFormatter=e;class r{constructor(t){this.unit=t,this.multiplier="nanoseconds"===t?1e-9:"microseconds"===t?1e-6:"milliseconds"===t?.001:1}formatUnsigned(e){const r=e*this.multiplier;if(r/60>=1){const e=Math.floor(r/60),o=Math.floor(r-60*e).toString();return`${e}:${(0,t.zeroPad)(o,2)}`}return r/1>=1?`${r.toFixed(2)}s`:r/.001>=1?`${(r/.001).toFixed(2)}ms`:r/1e-6>=1?`${(r/1e-6).toFixed(2)}µs`:`${(r/1e-9).toFixed(2)}ns`}format(t){return`${t<0?"-":""}${this.formatUnsigned(Math.abs(t))}`}}exports.TimeFormatter=r;class o{constructor(){this.unit="bytes"}format(t){return t<1024?`${t.toFixed(0)} B`:(t/=1024)<1024?`${t.toFixed(2)} KB`:(t/=1024)<1024?`${t.toFixed(2)} MB`:`${(t/=1024).toFixed(2)} GB`}}exports.ByteFormatter=o; +},{"./utils":70}],145:[function(require,module,exports) { +var t=null;function r(){return t||(t=e()),t}function e(){try{throw new Error}catch(r){var t=(""+r.stack).match(/(https?|file|ftp):\/\/[^)\n]+/g);if(t)return n(t[0])}return"/"}function n(t){return(""+t).replace(/^((?:https?|file|ftp):\/\/.+)\/[^\/]+$/,"$1")+"/"}exports.getBundleURL=r,exports.getBaseURL=n; +},{}],66:[function(require,module,exports) { +var r=require("./bundle-url").getBundleURL;function e(r){Array.isArray(r)||(r=[r]);var e=r[r.length-1];try{return Promise.resolve(require(e))}catch(n){if("MODULE_NOT_FOUND"===n.code)return new u(function(n,i){t(r.slice(0,-1)).then(function(){return require(e)}).then(n,i)});throw n}}function t(r){return Promise.all(r.map(s))}var n={};function i(r,e){n[r]=e}module.exports=exports=e,exports.load=t,exports.register=i;var o={};function s(e){var t;if(Array.isArray(e)&&(t=e[1],e=e[0]),o[e])return o[e];var i=(e.substring(e.lastIndexOf(".")+1,e.length)||e).toLowerCase(),s=n[i];return s?o[e]=s(r()+e).then(function(r){return r&&module.bundle.register(t,r),r}):void 0}function u(r){this.executor=r,this.promise=null}u.prototype.then=function(r,e){return null===this.promise&&(this.promise=new Promise(this.executor)),this.promise.then(r,e)},u.prototype.catch=function(r){return null===this.promise&&(this.promise=new Promise(this.executor)),this.promise.catch(r)}; +},{"./bundle-url":145}],139:[function(require,module,exports) { +"use strict";Object.defineProperty(exports,"__esModule",{value:!0}),exports.CallTreeProfileBuilder=exports.StackListProfileBuilder=exports.Profile=exports.CallTreeNode=exports.Frame=exports.HasWeights=void 0;var e=require("./utils"),t=require("./value-formatters"),s=function(e,t,s,r){return new(s||(s=Promise))(function(a,i){function l(e){try{h(r.next(e))}catch(e){i(e)}}function o(e){try{h(r.throw(e))}catch(e){i(e)}}function h(e){e.done?a(e.value):new s(function(t){t(e.value)}).then(l,o)}h((r=r.apply(e,t||[])).next())})};const r=require("_bundle_loader")(require.resolve("./demangle-cpp"));r.then(()=>{});class a{constructor(){this.selfWeight=0,this.totalWeight=0}getSelfWeight(){return this.selfWeight}getTotalWeight(){return this.totalWeight}addToTotalWeight(e){this.totalWeight+=e}addToSelfWeight(e){this.selfWeight+=e}overwriteWeightWith(e){this.selfWeight=e.selfWeight,this.totalWeight=e.totalWeight}}exports.HasWeights=a;class i extends a{constructor(e){super(),this.key=e.key,this.name=e.name,this.file=e.file,this.line=e.line,this.col=e.col}static getOrInsert(e,t){return e.getOrInsert(new i(t))}}exports.Frame=i,i.root=new i({key:"(speedscope root)",name:"(speedscope root)"});class l extends a{constructor(e,t){super(),this.frame=e,this.parent=t,this.children=[],this.frozen=!1}isRoot(){return this.frame===i.root}isFrozen(){return this.frozen}freeze(){this.frozen=!0}}exports.CallTreeNode=l;class o{constructor(s=0){this.name="",this.frames=new e.KeyedSet,this.appendOrderCalltreeRoot=new l(i.root,null),this.groupedCalltreeRoot=new l(i.root,null),this.samples=[],this.weights=[],this.valueFormatter=new t.RawValueFormatter,this.totalNonIdleWeight=null,this.totalWeight=s}getAppendOrderCalltreeRoot(){return this.appendOrderCalltreeRoot}getGroupedCalltreeRoot(){return this.groupedCalltreeRoot}formatValue(e){return this.valueFormatter.format(e)}setValueFormatter(e){this.valueFormatter=e}getWeightUnit(){return this.valueFormatter.unit}getName(){return this.name}setName(e){this.name=e}getTotalWeight(){return this.totalWeight}getTotalNonIdleWeight(){return null===this.totalNonIdleWeight&&(this.totalNonIdleWeight=this.groupedCalltreeRoot.children.reduce((e,t)=>e+t.getTotalWeight(),0)),this.totalNonIdleWeight}forEachCallGrouped(e,t){!function s(r,a){r.frame!==i.root&&e(r,a);let l=0;const o=[...r.children];o.sort((e,t)=>e.getTotalWeight()>t.getTotalWeight()?-1:1),o.forEach(function(e){s(e,a+l),l+=e.getTotalWeight()}),r.frame!==i.root&&t(r,a+r.getTotalWeight())}(this.groupedCalltreeRoot,0)}forEachCall(t,s){let r=[],a=0,l=0;for(let o of this.samples){let h=null;for(h=o;h&&h.frame!=i.root&&-1===r.indexOf(h);h=h.parent);for(;r.length>0&&(0,e.lastOf)(r)!=h;){s(r.pop(),a)}const n=[];for(let e=o;e&&e.frame!=i.root&&e!=h;e=e.parent)n.push(e);n.reverse();for(let e of n)t(e,a);r=r.concat(n),a+=this.weights[l++]}for(let e=r.length-1;e>=0;e--)s(r[e],a)}forEachFrame(e){this.frames.forEach(e)}forEachSample(e){for(let t=0;t{r.frames.getOrInsert(e).overwriteWeightWith(e)}),r}getInvertedProfileForCallersOf(e){const t=i.getOrInsert(this.frames,e),s=new h,r=[];!function e(s){if(s.frame===t)r.push(s);else for(let t of s.children)e(t)}(this.appendOrderCalltreeRoot);for(let e of r){const t=[];for(let s=e;null!=s&&s.frame!==i.root;s=s.parent)t.push(s.frame);s.appendSampleWithWeight(t,e.getTotalWeight())}const a=s.build();return a.name=this.name,a.valueFormatter=this.valueFormatter,a}getProfileForCalleesOf(e){const t=i.getOrInsert(this.frames,e),s=new h;!function e(r){if(r.frame===t)!function(e){const t=[];!function e(r){t.push(r.frame),s.appendSampleWithWeight(t,r.getSelfWeight());for(let t of r.children)e(t);t.pop()}(e)}(r);else for(let t of r.children)e(t)}(this.appendOrderCalltreeRoot);const r=s.build();return r.name=this.name,r.valueFormatter=this.valueFormatter,r}demangle(){return s(this,void 0,void 0,function*(){let e=null;for(let t of this.frames)t.name.startsWith("__Z")&&(e||(e=(yield r).demangleCpp),t.name=e(t.name))})}remapNames(e){for(let t of this.frames)t.name=e(t.name)}}exports.Profile=o;class h extends o{constructor(){super(...arguments),this.pendingSample=null}_appendSample(t,s,r){if(isNaN(s))throw new Error("invalid weight");let a=r?this.appendOrderCalltreeRoot:this.groupedCalltreeRoot,o=new Set;for(let h of t){const t=i.getOrInsert(this.frames,h),n=r?(0,e.lastOf)(a.children):a.children.find(e=>e.frame===t);if(n&&!n.isFrozen()&&n.frame==t)a=n;else{const e=a;a=new l(t,a),e.children.push(a)}a.addToTotalWeight(s),o.add(a.frame)}if(a.addToSelfWeight(s),r)for(let e of a.children)e.freeze();if(r){a.frame.addToSelfWeight(s);for(let e of o)e.addToTotalWeight(s);a===(0,e.lastOf)(this.samples)?this.weights[this.weights.length-1]+=s:(this.samples.push(a),this.weights.push(s))}}appendSampleWithWeight(e,t){if(0!==t){if(t<0)throw new Error("Samples must have positive weights");this._appendSample(e,t,!0),this._appendSample(e,t,!1)}}appendSampleWithTimestamp(e,t){if(this.pendingSample){if(t0?this.appendSampleWithWeight(this.pendingSample.stack,this.pendingSample.centralTimestamp-this.pendingSample.startTimestamp):(this.appendSampleWithWeight(this.pendingSample.stack,1),this.setValueFormatter(new t.RawValueFormatter))),this.totalWeight=Math.max(this.totalWeight,this.weights.reduce((e,t)=>e+t,0)),this}}exports.StackListProfileBuilder=h;class n extends o{constructor(){super(...arguments),this.appendOrderStack=[this.appendOrderCalltreeRoot],this.groupedOrderStack=[this.groupedCalltreeRoot],this.framesInStack=new Map,this.stack=[],this.lastValue=0}addWeightsToFrames(t){const s=t-this.lastValue;for(let e of this.framesInStack.keys())e.addToTotalWeight(s);const r=(0,e.lastOf)(this.stack);r&&r.addToSelfWeight(s)}addWeightsToNodes(t,s){const r=t-this.lastValue;for(let e of s)e.addToTotalWeight(r);const a=(0,e.lastOf)(s);a&&a.addToSelfWeight(r)}_enterFrame(t,s,r){let a=r?this.appendOrderStack:this.groupedOrderStack;this.addWeightsToNodes(s,a);let i=(0,e.lastOf)(a);if(i){if(r){const e=s-this.lastValue;if(e>0)this.samples.push(i),this.weights.push(s-this.lastValue);else if(e<0)throw new Error(`Samples must be provided in increasing order of cumulative value. Last sample was ${this.lastValue}, this sample was ${s}`)}const o=r?(0,e.lastOf)(i.children):i.children.find(e=>e.frame===t);let h;o&&!o.isFrozen()&&o.frame==t?h=o:(h=new l(t,i),i.children.push(h)),a.push(h)}}enterFrame(e,t){const s=i.getOrInsert(this.frames,e);this.addWeightsToFrames(t),this._enterFrame(s,t,!0),this._enterFrame(s,t,!1),this.stack.push(s);const r=this.framesInStack.get(s)||0;this.framesInStack.set(s,r+1),this.lastValue=t}_leaveFrame(e,t,s){let r=s?this.appendOrderStack:this.groupedOrderStack;if(this.addWeightsToNodes(t,r),s){const s=this.appendOrderStack.pop();if(null==s)throw new Error(`Trying to leave ${e.key} when stack is empty`);if(null==this.lastValue)throw new Error(`Trying to leave a ${e.key} before any have been entered`);s.freeze();const r=t-this.lastValue;if(r>0)this.samples.push(s),this.weights.push(t-this.lastValue);else if(r<0)throw new Error(`Samples must be provided in increasing order of cumulative value. Last sample was ${this.lastValue}, this sample was ${t}`)}else this.groupedOrderStack.pop()}leaveFrame(e,t){const s=i.getOrInsert(this.frames,e);this.addWeightsToFrames(t),this._leaveFrame(s,t,!0),this._leaveFrame(s,t,!1),this.stack.pop();const r=this.framesInStack.get(s);null!=r&&(1===r?this.framesInStack.delete(s):this.framesInStack.set(s,r-1),this.lastValue=t,this.totalWeight=Math.max(this.totalWeight,this.lastValue))}build(){if(this.appendOrderStack.length>1||this.groupedOrderStack.length>1)throw new Error("Tried to complete profile construction with a non-empty stack");return this}}exports.CallTreeProfileBuilder=n; +},{"./utils":70,"./value-formatters":140,"_bundle_loader":66,"./demangle-cpp":[["demangle-cpp.8a387750.js",180],"demangle-cpp.8a387750.map",180]}],141:[function(require,module,exports) { +"use strict";Object.defineProperty(exports,"__esModule",{value:!0});var e=exports.FileFormat=void 0;!function(e){let t,o;!function(e){e.EVENTED="evented",e.SAMPLED="sampled"}(t=e.ProfileType||(e.ProfileType={})),function(e){e.OPEN_FRAME="O",e.CLOSE_FRAME="C"}(o=e.EventType||(e.EventType={}))}(e||(exports.FileFormat=e={})); +},{}],19:[function(require,module,exports) { +module.exports={name:"speedscope",version:"1.5.3",description:"",repository:"jlfwong/speedscope",main:"index.js",bin:{speedscope:"./bin/cli.js"},scripts:{deploy:"./scripts/deploy.sh",prepack:"./scripts/build-release.sh",prettier:"prettier --write 'src/**/*.ts' 'src/**/*.tsx'",lint:"eslint 'src/**/*.ts' 'src/**/*.tsx'",jest:"./scripts/test-setup.sh && jest --runInBand",coverage:"npm run jest -- --coverage && coveralls < coverage/lcov.info",test:"tsc --noEmit && npm run lint && npm run coverage",serve:"parcel assets/index.html --open --no-autoinstall"},files:["bin/cli.js","dist/release/**","!*.map"],browserslist:["last 2 Chrome versions","last 2 Firefox versions"],author:"",license:"MIT",devDependencies:{"@types/jest":"22.2.3","@types/jszip":"3.1.4","@types/node":"10.1.4","@types/pako":"1.0.0",aphrodite:"2.1.0",coveralls:"3.0.1",eslint:"4.19.1","eslint-plugin-prettier":"2.6.0",jest:"24.3.0",jsverify:"0.8.3",jszip:"3.1.5",pako:"1.0.6","parcel-bundler":"1.9.2",preact:"8.2.7","preact-redux":"jlfwong/preact-redux#a56dcc4",prettier:"1.12.0",protobufjs:"6.8.8",quicktype:"15.0.209",redux:"^4.0.0","ts-jest":"24.3.0",typescript:"3.2.4","typescript-eslint-parser":"17.0.1","uglify-es":"3.2.2"},jest:{transform:{"^.+\\.tsx?$":"ts-jest"},testRegex:"\\.test\\.tsx?$",collectCoverageFrom:["**/*.{ts,tsx}","!**/*.d.{ts,tsx}"],moduleFileExtensions:["ts","tsx","js","jsx","json"]},dependencies:{opn:"5.3.0"}}; +},{}],83:[function(require,module,exports) { +"use strict";Object.defineProperty(exports,"__esModule",{value:!0}),exports.exportProfileGroup=r,exports.importSpeedscopeProfiles=s,exports.saveToFile=l;var e=require("./profile"),t=require("./value-formatters"),n=require("./file-format-spec");function r(e){const t=[],n=new Map;function r(e){let r=n.get(e);if(null==r){const o={name:e.name};null!=e.file&&(o.file=e.file),null!=e.line&&(o.line=e.line),null!=e.col&&(o.col=e.col),r=t.length,n.set(e,r),t.push(o)}return r}const a={exporter:`speedscope@${require("../../package.json").version}`,name:e.name,activeProfileIndex:e.indexToView,$schema:"https://www.speedscope.app/file-format-schema.json",shared:{frames:t},profiles:[]};for(let t of e.profiles)a.profiles.push(o(t,r));return a}function o(e,t){const r={type:n.FileFormat.ProfileType.EVENTED,name:e.getName(),unit:e.getWeightUnit(),startValue:0,endValue:e.getTotalWeight(),events:[]};return e.forEachCall((e,o)=>{r.events.push({type:n.FileFormat.EventType.OPEN_FRAME,frame:t(e.frame),at:o})},(e,o)=>{r.events.push({type:n.FileFormat.EventType.CLOSE_FRAME,frame:t(e.frame),at:o})}),r}function a(r,o){function a(e){const{name:n,unit:o}=r;switch(o){case"nanoseconds":case"microseconds":case"milliseconds":case"seconds":e.setValueFormatter(new t.TimeFormatter(o));break;case"bytes":e.setValueFormatter(new t.ByteFormatter);break;case"none":e.setValueFormatter(new t.RawValueFormatter)}e.setName(n)}switch(r.type){case n.FileFormat.ProfileType.EVENTED:return function(t){const{startValue:r,endValue:s,events:l}=t,i=new e.CallTreeProfileBuilder(s-r);a(i);const c=o.map((e,t)=>Object.assign({key:t},e));for(let e of l)switch(e.type){case n.FileFormat.EventType.OPEN_FRAME:i.enterFrame(c[e.frame],e.at-r);break;case n.FileFormat.EventType.CLOSE_FRAME:i.leaveFrame(c[e.frame],e.at-r)}return i.build()}(r);case n.FileFormat.ProfileType.SAMPLED:return function(t){const{startValue:n,endValue:r,samples:s,weights:l}=t,i=new e.StackListProfileBuilder(r-n);a(i);const c=o.map((e,t)=>Object.assign({key:t},e));if(s.length!==l.length)throw new Error(`Expected samples.length (${s.length}) to equal weights.length (${l.length})`);for(let e=0;ec[e]),n)}return i.build()}(r)}}function s(e){return{name:e.name||e.profiles[0].name||"profile",indexToView:e.activeProfileIndex||0,profiles:e.profiles.map(t=>a(t,e.shared.frames))}}function l(e){const t=r(e),n=new Blob([JSON.stringify(t)],{type:"text/json"}),o=`${(t.name?t.name.split(".")[0]:"profile").replace(/\W+/g,"_")}.speedscope.json`;console.log("Saving",o);const a=document.createElement("a");a.download=o,a.href=window.URL.createObjectURL(n),a.dataset.downloadurl=["text/json",a.download,a.href].join(":"),document.body.appendChild(a),a.click(),document.body.removeChild(a)} +},{"./profile":139,"./value-formatters":140,"./file-format-spec":141,"../../package.json":19}],64:[function(require,module,exports) { +module.exports="perf-vertx-stacks-01-collapsed-all.3e0a632c.txt"; +},{}],34:[function(require,module,exports) { +"use strict";Object.defineProperty(exports,"__esModule",{value:!0}),exports.Application=exports.GLCanvas=exports.Toolbar=void 0;var e=require("preact"),t=require("aphrodite"),o=require("./style"),i=require("../lib/emscripten"),s=require("./sandwich-view"),r=require("../lib/file-format"),n=require("../store"),a=require("../lib/typed-redux"),l=require("./flamechart-view-container"),c=require("../gl/graphics"),d=function(e,t,o,i){return new(o||(o=Promise))(function(s,r){function n(e){try{l(i.next(e))}catch(e){r(e)}}function a(e){try{l(i.throw(e))}catch(e){r(e)}}function l(e){e.done?s(e.value):new o(function(t){t(e.value)}).then(n,a)}l((i=i.apply(e,t||[])).next())})};const p=require("_bundle_loader")(require.resolve("../import"));function h(e,t){return d(this,void 0,void 0,function*(){return(yield p).importProfileGroupFromText(e,t)})}function f(e,t){return d(this,void 0,void 0,function*(){return(yield p).importProfileGroupFromBase64(e,t)})}function m(e,t){return d(this,void 0,void 0,function*(){return(yield p).importProfilesFromArrayBuffer(e,t)})}function u(e){return d(this,void 0,void 0,function*(){return(yield p).importProfilesFromFile(e)})}function v(e){return d(this,void 0,void 0,function*(){return(yield p).importFromFileSystemDirectoryEntry(e)})}p.then(()=>{});const g=require("../../sample/profiles/stackcollapse/perf-vertx-stacks-01-collapsed-all.txt");class w extends a.StatelessComponent{constructor(){super(...arguments),this.setTimeOrder=(()=>{this.props.setViewMode(n.ViewMode.CHRONO_FLAME_CHART)}),this.setLeftHeavyOrder=(()=>{this.props.setViewMode(n.ViewMode.LEFT_HEAVY_FLAME_GRAPH)}),this.setSandwichView=(()=>{this.props.setViewMode(n.ViewMode.SANDWICH_VIEW)})}renderLeftContent(){return this.props.activeProfileState?(0,e.h)("div",{className:(0,t.css)(y.toolbarLeft)},(0,e.h)("div",{className:(0,t.css)(y.toolbarTab,this.props.viewMode===n.ViewMode.CHRONO_FLAME_CHART&&y.toolbarTabActive),onClick:this.setTimeOrder},(0,e.h)("span",{className:(0,t.css)(y.emoji)},"🕰"),"Time Order"),(0,e.h)("div",{className:(0,t.css)(y.toolbarTab,this.props.viewMode===n.ViewMode.LEFT_HEAVY_FLAME_GRAPH&&y.toolbarTabActive),onClick:this.setLeftHeavyOrder},(0,e.h)("span",{className:(0,t.css)(y.emoji)},"⬅️"),"Left Heavy"),(0,e.h)("div",{className:(0,t.css)(y.toolbarTab,this.props.viewMode===n.ViewMode.SANDWICH_VIEW&&y.toolbarTabActive),onClick:this.setSandwichView},(0,e.h)("span",{className:(0,t.css)(y.emoji)},"🥪"),"Sandwich")):null}renderCenterContent(){const{activeProfileState:o,profileGroup:i}=this.props;if(o&&i){const{index:r}=o;if(1===i.profiles.length)return o.profile.getName();{function s(o,i,s){return(0,e.h)("button",{disabled:i,onClick:s,className:(0,t.css)(y.emoji,y.toolbarProfileNavButton,i&&y.toolbarProfileNavButtonDisabled)},o)}const n=s("⬅️",0===r,()=>this.props.setProfileIndexToView(r-1)),a=s("➡️",r>=i.profiles.length-1,()=>this.props.setProfileIndexToView(r+1));return(0,e.h)("div",{className:(0,t.css)(y.toolbarCenter)},n,o.profile.getName()," ",(0,e.h)("span",{className:(0,t.css)(y.toolbarProfileIndex)},"(",o.index+1,"/",i.profiles.length,")"),a)}}return"🔬speedscope"}renderRightContent(){const o=(0,e.h)("div",{className:(0,t.css)(y.toolbarTab),onClick:this.props.browseForFile},(0,e.h)("span",{className:(0,t.css)(y.emoji)},"⤵️"),"Import"),i=(0,e.h)("div",{className:(0,t.css)(y.toolbarTab)},(0,e.h)("a",{href:"https://github.com/jlfwong/speedscope#usage",className:(0,t.css)(y.noLinkStyle),target:"_blank"},(0,e.h)("span",{className:(0,t.css)(y.emoji)},"❓"),"Help"));return(0,e.h)("div",{className:(0,t.css)(y.toolbarRight)},this.props.activeProfileState&&(0,e.h)("div",{className:(0,t.css)(y.toolbarTab),onClick:this.props.saveFile},(0,e.h)("span",{className:(0,t.css)(y.emoji)},"⤴️"),"Export"),o,i)}render(){return(0,e.h)("div",{className:(0,t.css)(y.toolbar)},this.renderLeftContent(),this.renderCenterContent(),this.renderRightContent())}}exports.Toolbar=w;class b extends e.Component{constructor(){super(...arguments),this.canvas=null,this.ref=(e=>{e instanceof HTMLCanvasElement?this.canvas=e:this.canvas=null,this.props.setGLCanvas(this.canvas)}),this.container=null,this.containerRef=(e=>{e instanceof HTMLElement?this.container=e:this.container=null}),this.maybeResize=(()=>{if(!this.container)return;if(!this.props.canvasContext)return;let{width:e,height:t}=this.container.getBoundingClientRect();const o=e,i=t,s=e*window.devicePixelRatio,r=t*window.devicePixelRatio;this.props.canvasContext.gl.resize(s,r,o,i),this.props.canvasContext.gl.clear(new c.Graphics.Color(1,1,1,1))}),this.onWindowResize=(()=>{this.props.canvasContext&&this.props.canvasContext.requestFrame()})}componentWillReceiveProps(e){this.props.canvasContext!==e.canvasContext&&(this.props.canvasContext&&this.props.canvasContext.removeBeforeFrameHandler(this.maybeResize),e.canvasContext&&(e.canvasContext.addBeforeFrameHandler(this.maybeResize),e.canvasContext.requestFrame()))}componentDidMount(){window.addEventListener("resize",this.onWindowResize)}componentWillUnmount(){this.props.canvasContext&&this.props.canvasContext.removeBeforeFrameHandler(this.maybeResize),window.removeEventListener("resize",this.onWindowResize)}render(){return(0,e.h)("div",{ref:this.containerRef,className:(0,t.css)(y.glCanvasView)},(0,e.h)("canvas",{ref:this.ref,width:1,height:1}))}}exports.GLCanvas=b;class C extends a.StatelessComponent{constructor(){super(...arguments),this.loadExample=(()=>{this.loadProfile(()=>d(this,void 0,void 0,function*(){return yield h("perf-vertx-stacks-01-collapsed-all.txt",yield fetch(g).then(e=>e.text()))}))}),this.onDrop=(e=>{if(this.props.setDragActive(!1),e.preventDefault(),!e.dataTransfer)return;const t=e.dataTransfer.items[0];if("webkitGetAsEntry"in t){const e=t.webkitGetAsEntry();if(e.isDirectory&&e.name.endsWith(".trace"))return console.log("Importing as Instruments.app .trace file"),void this.loadProfile(()=>d(this,void 0,void 0,function*(){return yield v(e)}))}let o=e.dataTransfer.files.item(0);o&&this.loadFromFile(o)}),this.onDragOver=(e=>{this.props.setDragActive(!0),e.preventDefault()}),this.onDragLeave=(e=>{this.props.setDragActive(!1),e.preventDefault()}),this.onWindowKeyPress=(e=>d(this,void 0,void 0,function*(){if("1"===e.key)this.props.setViewMode(n.ViewMode.CHRONO_FLAME_CHART);else if("2"===e.key)this.props.setViewMode(n.ViewMode.LEFT_HEAVY_FLAME_GRAPH);else if("3"===e.key)this.props.setViewMode(n.ViewMode.SANDWICH_VIEW);else if("r"===e.key){const{flattenRecursion:e}=this.props;this.props.setFlattenRecursion(!e)}else if("n"===e.key){const{activeProfileState:e}=this.props;e&&this.props.setProfileIndexToView(e.index+1)}else if("p"===e.key){const{activeProfileState:e}=this.props;e&&this.props.setProfileIndexToView(e.index-1)}})),this.saveFile=(()=>{if(this.props.profileGroup){const{name:e,indexToView:t,profiles:o}=this.props.profileGroup,i={name:e,indexToView:t,profiles:o.map(e=>e.profile)};(0,r.saveToFile)(i)}}),this.browseForFile=(()=>{const e=document.createElement("input");e.type="file",e.addEventListener("change",this.onFileSelect),e.click()}),this.onWindowKeyDown=(e=>d(this,void 0,void 0,function*(){"s"===e.key&&(e.ctrlKey||e.metaKey)?(e.preventDefault(),this.saveFile()):"o"===e.key&&(e.ctrlKey||e.metaKey)&&(e.preventDefault(),this.browseForFile())})),this.onDocumentPaste=(e=>{e.preventDefault(),e.stopPropagation();const t=e.clipboardData;if(!t)return;const o=t.getData("text");this.loadProfile(()=>d(this,void 0,void 0,function*(){return yield h("From Clipboard",o)}))}),this.onFileSelect=(e=>{const t=e.target.files.item(0);t&&this.loadFromFile(t)})}loadProfile(e){return d(this,void 0,void 0,function*(){if(this.props.setLoading(!0),yield new Promise(e=>setTimeout(e,0)),!this.props.glCanvas)return;console.time("import");let t=null;try{t=yield e()}catch(e){return console.log("Failed to load format",e),void this.props.setError(!0)}if(null==t)return alert("Unrecognized format! See documentation about supported formats."),void this.props.setLoading(!1);if(0===t.profiles.length)return alert("Successfully imported profile, but it's empty!"),void this.props.setLoading(!1);this.props.hashParams.title&&(t=Object.assign({name:this.props.hashParams.title},t)),document.title=`${t.name} - speedscope`;for(let e of t.profiles)yield e.demangle();for(let e of t.profiles){const t=this.props.hashParams.title||e.getName();e.setName(t)}console.timeEnd("import"),this.props.setProfileGroup(t),this.props.setLoading(!1)})}loadFromFile(e){this.loadProfile(()=>d(this,void 0,void 0,function*(){const t=yield u(e);if(t){for(let o of t.profiles)o.getName()||o.setName(e.name);return t}if(this.props.profileGroup&&this.props.activeProfileState){const t=new FileReader,o=new Promise(e=>{t.addEventListener("loadend",()=>{if("string"!=typeof t.result)throw new Error("Expected reader.result to be a string");e(t.result)})});t.readAsText(e);const s=yield o,r=(0,i.importEmscriptenSymbolMap)(s);if(r){const{profile:e,index:t}=this.props.activeProfileState;return console.log("Importing as emscripten symbol map"),e.remapNames(e=>r.get(e)||e),{name:this.props.profileGroup.name||"profile",indexToView:t,profiles:[e]}}}return null}))}componentDidMount(){window.addEventListener("keydown",this.onWindowKeyDown),window.addEventListener("keypress",this.onWindowKeyPress),document.addEventListener("paste",this.onDocumentPaste),this.maybeLoadHashParamProfile()}componentWillUnmount(){window.removeEventListener("keydown",this.onWindowKeyDown),window.removeEventListener("keypress",this.onWindowKeyPress),document.removeEventListener("paste",this.onDocumentPaste)}maybeLoadHashParamProfile(){return d(this,void 0,void 0,function*(){if(this.props.hashParams.profileURL){if(!n.canUseXHR)return void alert(`Cannot load a profile URL when loading from "${window.location.protocol}" URL protocol`);this.loadProfile(()=>d(this,void 0,void 0,function*(){const e=yield fetch(this.props.hashParams.profileURL);let t=new URL(this.props.hashParams.profileURL).pathname;return t.includes("/")&&(t=t.slice(t.lastIndexOf("/")+1)),yield m(t,yield e.arrayBuffer())}))}else if(this.props.hashParams.localProfilePath){window.speedscope={loadFileFromBase64:(e,t)=>{this.loadProfile(()=>f(e,t))}};const e=document.createElement("script");e.src=`file:///${this.props.hashParams.localProfilePath}`,document.head.appendChild(e)}})}renderLanding(){return(0,e.h)("div",{className:(0,t.css)(y.landingContainer)},(0,e.h)("div",{className:(0,t.css)(y.landingMessage)},(0,e.h)("p",{className:(0,t.css)(y.landingP)},"👋 Hi there! Welcome to 🔬speedscope, an interactive"," ",(0,e.h)("a",{className:(0,t.css)(y.link),href:"http://www.brendangregg.com/FlameGraphs/cpuflamegraphs.html"},"flamegraph")," ","visualizer. Use it to help you make your software faster."),n.canUseXHR?(0,e.h)("p",{className:(0,t.css)(y.landingP)},"Drag and drop a profile file onto this window to get started, click the big blue button below to browse for a profile to explore, or"," ",(0,e.h)("a",{tabIndex:0,className:(0,t.css)(y.link),onClick:this.loadExample},"click here")," ","to load an example profile."):(0,e.h)("p",{className:(0,t.css)(y.landingP)},"Drag and drop a profile file onto this window to get started, or click the big blue button below to browse for a profile to explore."),(0,e.h)("div",{className:(0,t.css)(y.browseButtonContainer)},(0,e.h)("input",{type:"file",name:"file",id:"file",onChange:this.onFileSelect,className:(0,t.css)(y.hide)}),(0,e.h)("label",{for:"file",className:(0,t.css)(y.browseButton),tabIndex:0},"Browse")),(0,e.h)("p",{className:(0,t.css)(y.landingP)},"See the"," ",(0,e.h)("a",{className:(0,t.css)(y.link),href:"https://github.com/jlfwong/speedscope#usage",target:"_blank"},"documentation")," ","for information about supported file formats, keyboard shortcuts, and how to navigate around the profile."),(0,e.h)("p",{className:(0,t.css)(y.landingP)},"speedscope is open source. Please"," ",(0,e.h)("a",{className:(0,t.css)(y.link),target:"_blank",href:"https://github.com/jlfwong/speedscope/issues"},"report any issues on GitHub"),".")))}renderError(){return(0,e.h)("div",{className:(0,t.css)(y.error)},(0,e.h)("div",null,"😿 Something went wrong."),(0,e.h)("div",null,"Check the JS console for more details."))}renderLoadingBar(){return(0,e.h)("div",{className:(0,t.css)(y.loading)})}renderContent(){const{viewMode:t,activeProfileState:o,error:i,loading:r,glCanvas:a}=this.props;if(i)return this.renderError();if(r)return this.renderLoadingBar();if(!o||!a)return this.renderLanding();switch(t){case n.ViewMode.CHRONO_FLAME_CHART:return(0,e.h)(l.ChronoFlamechartView,{activeProfileState:o,glCanvas:a});case n.ViewMode.LEFT_HEAVY_FLAME_GRAPH:return(0,e.h)(l.LeftHeavyFlamechartView,{activeProfileState:o,glCanvas:a});case n.ViewMode.SANDWICH_VIEW:return(0,e.h)(s.SandwichViewContainer,{activeProfileState:o,glCanvas:a})}}render(){return(0,e.h)("div",{onDrop:this.onDrop,onDragOver:this.onDragOver,onDragLeave:this.onDragLeave,className:(0,t.css)(y.root,this.props.dragActive&&y.dragTargetRoot)},(0,e.h)(b,{setGLCanvas:this.props.setGLCanvas,canvasContext:this.props.canvasContext}),(0,e.h)(w,Object.assign({saveFile:this.saveFile,browseForFile:this.browseForFile},this.props)),(0,e.h)("div",{className:(0,t.css)(y.contentContainer)},this.renderContent()),this.props.dragActive&&(0,e.h)("div",{className:(0,t.css)(y.dragTarget)}))}}exports.Application=C;const y=t.StyleSheet.create({glCanvasView:{position:"absolute",width:"100vw",height:"100vh",zIndex:-1,pointerEvents:"none"},error:{display:"flex",flexDirection:"column",alignItems:"center",justifyContent:"center",height:"100%"},loading:{height:3,marginBottom:-3,background:o.Colors.DARK_BLUE,transformOrigin:"0% 50%",animationName:[{from:{transform:"scaleX(0)"},to:{transform:"scaleX(1)"}}],animationTimingFunction:"cubic-bezier(0, 1, 0, 1)",animationDuration:"30s"},root:{width:"100vw",height:"100vh",overflow:"hidden",display:"flex",flexDirection:"column",position:"relative",fontFamily:o.FontFamily.MONOSPACE,lineHeight:"20px"},dragTargetRoot:{cursor:"copy"},dragTarget:{boxSizing:"border-box",position:"absolute",top:0,left:0,width:"100%",height:"100%",border:`5px dashed ${o.Colors.DARK_BLUE}`,pointerEvents:"none"},contentContainer:{position:"relative",display:"flex",overflow:"hidden",flexDirection:"column",flex:1},landingContainer:{display:"flex",alignItems:"center",justifyContent:"center",flex:1},landingMessage:{maxWidth:600},landingP:{marginBottom:16},hide:{display:"none"},browseButtonContainer:{display:"flex",alignItems:"center",justifyContent:"center"},browseButton:{marginBottom:16,height:72,flex:1,maxWidth:256,textAlign:"center",fontSize:o.FontSize.BIG_BUTTON,lineHeight:"72px",background:o.Colors.DARK_BLUE,color:o.Colors.WHITE,transition:`all ${o.Duration.HOVER_CHANGE} ease-in`,":hover":{background:o.Colors.BRIGHT_BLUE}},link:{color:o.Colors.BRIGHT_BLUE,cursor:"pointer",textDecoration:"none"},toolbar:{height:o.Sizes.TOOLBAR_HEIGHT,flexShrink:0,background:o.Colors.BLACK,color:o.Colors.WHITE,textAlign:"center",fontFamily:o.FontFamily.MONOSPACE,fontSize:o.FontSize.TITLE,lineHeight:`${o.Sizes.TOOLBAR_TAB_HEIGHT}px`,userSelect:"none"},toolbarLeft:{position:"absolute",height:o.Sizes.TOOLBAR_HEIGHT,overflow:"hidden",top:0,left:0,marginRight:2,textAlign:"left"},toolbarCenter:{paddingTop:1,height:o.Sizes.TOOLBAR_HEIGHT},toolbarRight:{height:o.Sizes.TOOLBAR_HEIGHT,overflow:"hidden",position:"absolute",top:0,right:0,marginRight:2,textAlign:"right"},toolbarProfileIndex:{color:o.Colors.LIGHT_GRAY},toolbarProfileNavButton:{opacity:.8,fontSize:o.FontSize.TITLE,lineHeight:`${o.Sizes.TOOLBAR_TAB_HEIGHT}px`,":hover":{opacity:1},background:"none",border:"none",padding:0,marginLeft:"0.3em",marginRight:"0.3em",transition:`all ${o.Duration.HOVER_CHANGE} ease-in`},toolbarProfileNavButtonDisabled:{opacity:.5,":hover":{opacity:.5}},toolbarTab:{background:o.Colors.DARK_GRAY,marginTop:o.Sizes.SEPARATOR_HEIGHT,height:o.Sizes.TOOLBAR_TAB_HEIGHT,lineHeight:`${o.Sizes.TOOLBAR_TAB_HEIGHT}px`,paddingLeft:2,paddingRight:8,display:"inline-block",marginLeft:2,transition:`all ${o.Duration.HOVER_CHANGE} ease-in`,":hover":{background:o.Colors.GRAY}},toolbarTabActive:{background:o.Colors.BRIGHT_BLUE,":hover":{background:o.Colors.BRIGHT_BLUE}},noLinkStyle:{textDecoration:"none",color:"inherit"},emoji:{display:"inline-block",verticalAlign:"middle",paddingTop:"0px",marginRight:"0.3em"}}); +},{"preact":24,"aphrodite":68,"./style":79,"../lib/emscripten":81,"./sandwich-view":60,"../lib/file-format":83,"../store":29,"../lib/typed-redux":36,"./flamechart-view-container":62,"../gl/graphics":42,"_bundle_loader":66,"../import":[["import.a03c2bef.js",104],"import.a03c2bef.map",104],"../../sample/profiles/stackcollapse/perf-vertx-stacks-01-collapsed-all.txt":64}],21:[function(require,module,exports) { +"use strict";Object.defineProperty(exports,"__esModule",{value:!0}),exports.ApplicationContainer=void 0;var e=require("../lib/typed-redux"),t=require("./application"),i=require("../store/getters"),o=require("../store/actions"),n=require("../gl/graphics");const r=exports.ApplicationContainer=(0,e.createContainer)(t.Application,(t,r)=>{const{flattenRecursion:s,profileGroup:a}=t;let l=null;if(a&&a.profiles.length>a.indexToView){const e=a.indexToView,t=a.profiles[e];l=Object.assign({},a.profiles[a.indexToView],{profile:(0,i.getProfileToView)({profile:t.profile,flattenRecursion:s}),index:a.indexToView})}function c(t){return(0,e.bindActionCreator)(r,t)}const p={setGLCanvas:c(o.actions.setGLCanvas),setLoading:c(o.actions.setLoading),setError:c(o.actions.setError),setProfileGroup:c(o.actions.setProfileGroup),setDragActive:c(o.actions.setDragActive),setViewMode:c(o.actions.setViewMode),setFlattenRecursion:c(o.actions.setFlattenRecursion),setProfileIndexToView:c(o.actions.setProfileIndexToView)};return Object.assign({activeProfileState:l,dispatch:r,canvasContext:t.glCanvas?(0,i.getCanvasContext)(t.glCanvas):null,resizeCanvas:(e,o,r,s)=>{if(t.glCanvas){const a=(0,i.getCanvasContext)(t.glCanvas).gl;a.resize(e,o,r,s),a.clear(new n.Graphics.Color(1,1,1,1))}}},p,t)}); +},{"../lib/typed-redux":36,"./application":34,"../store/getters":38,"../store/actions":40,"../gl/graphics":42}],13:[function(require,module,exports) { +"use strict";var e=require("preact"),o=require("./store"),t=require("preact-redux"),r=require("./views/application-container");console.log(`speedscope v${require("../package.json").version}`),module.hot&&(module.hot.dispose(()=>{(0,e.render)((0,e.h)("div",null),document.body,document.body.lastElementChild||void 0)}),module.hot.accept());const i=window.store,d=(0,o.createApplicationStore)(i?i.getState():{});window.store=d,(0,e.render)((0,e.h)(t.Provider,{store:d},(0,e.h)(r.ApplicationContainer,null)),document.body,document.body.lastElementChild||void 0); +},{"preact":24,"./store":29,"preact-redux":26,"./views/application-container":21,"../package.json":19}],215:[function(require,module,exports) { +module.exports=function(n){return new Promise(function(e,o){var r=document.createElement("script");r.async=!0,r.type="text/javascript",r.charset="utf-8",r.src=n,r.onerror=function(n){r.onerror=r.onload=null,o(n)},r.onload=function(){r.onerror=r.onload=null,e()},document.getElementsByTagName("head")[0].appendChild(r)})}; +},{}],0:[function(require,module,exports) { +var b=require(66);b.register("js",require(215)); +},{}]},{},[0,13], null) +//# sourceMappingURL=speedscope.9ee942dd.map \ No newline at end of file diff --git a/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/css/main.388a904b.css.map b/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/css/main.388a904b.css.map new file mode 100644 index 0000000000000000000000000000000000000000..704ad10ed26462dfc09a495f395727d58e9358e5 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/css/main.388a904b.css.map @@ -0,0 +1 @@ +{"version":3,"file":"static/css/main.388a904b.css","mappings":"AACA,WAGE,iBAAkB,CAFlB,kBAAqB,CACrB,iBAAkB,CAElB,eAAgB,CAChB,oNAKF,CAGA,WAGE,iBAAkB,CAFlB,kBAAqB,CACrB,iBAAkB,CAElB,eAAgB,CAChB,4OAKF,CAGA,WAGE,iBAAkB,CAFlB,kBAAqB,CACrB,iBAAkB,CAElB,eAAgB,CAChB,sNAKF,CAGA,WAGE,iBAAkB,CAFlB,kBAAqB,CACrB,iBAAkB,CAElB,eAAgB,CAChB,8OAKF,CAGA,WAGE,iBAAkB,CAFlB,kBAAqB,CACrB,iBAAkB,CAElB,eAAgB,CAChB,0NAKF,CAGA,WAGE,iBAAkB,CAFlB,kBAAqB,CACrB,iBAAkB,CAElB,eAAgB,CAChB,kPAKF,CAGA,WAGE,iBAAkB,CAFlB,kBAAqB,CACrB,iBAAkB,CAElB,eAAgB,CAChB,wNAKF,CAGA,WAGE,iBAAkB,CAFlB,kBAAqB,CACrB,iBAAkB,CAElB,eAAgB,CAChB,gPAKF,CAGA,WAGE,iBAAkB,CAFlB,kBAAqB,CACrB,iBAAkB,CAElB,eAAgB,CAChB,oNAKF,CAGA,WAGE,iBAAkB,CAFlB,kBAAqB,CACrB,iBAAkB,CAElB,eAAgB,CAChB,4OAKF,CAGA,WAGE,iBAAkB,CAFlB,kBAAqB,CACrB,iBAAkB,CAElB,eAAgB,CAChB,sNAKF,CAGA,WAGE,iBAAkB,CAFlB,kBAAqB,CACrB,iBAAkB,CAElB,eAAgB,CAChB,8OAKF,CCjJA,WAIE,aAAc,CAHd,aAAc,CACd,eAAgB,CAChB,YAEF,CACA,uJAME,aACF,CACA,wCACE,aACF,CACA,2RAWE,aACF,CACA,gGAIE,aACF,CACA,4NASE,eACF,CACA,0BACE,iBACF,CCtDA,YAIE,UAAW,CAHX,aAAc,CACd,eAAgB,CAChB,YAEF,CAEA,kDAEE,UAAW,CACX,iBACF,CAEA,iFAGE,UAAW,CACX,eACF,CAEA,mJAKE,UACF,CAEA,kDAEE,UACF,CAEA,gFAGE,UAAW,CACX,eACF,CAEA,wBACE,eACF,CAEA,2DAEE,UAAW,CACX,eACF,CAEA,yEAGE,UAAc,CACd,eACF,CAEA,gDAEE,aACF,CAEA,kDAEE,aACF,CAEA,0DAEE,aACF,CAEA,uBACE,UAAW,CACX,eACF,CAEA,2BACE,eACF,CAEA,2BACE,eACF,CAEA,2BACE,iBACF,CAEA,yBACE,eACF,CC/FA,cACE,wBACF","sources":["../node_modules/typeface-roboto/index.css","components/LogView/darcula.css","components/LogView/github.css","components/LogView/index.css"],"sourcesContent":["/* roboto-100normal - latin */\n@font-face {\n font-family: 'Roboto';\n font-style: normal;\n font-display: swap;\n font-weight: 100;\n src:\n local('Roboto Thin '),\n local('Roboto-Thin'),\n url('./files/roboto-latin-100.woff2') format('woff2'), /* Super Modern Browsers */\n url('./files/roboto-latin-100.woff') format('woff'); /* Modern Browsers */\n}\n\n/* roboto-100italic - latin */\n@font-face {\n font-family: 'Roboto';\n font-style: italic;\n font-display: swap;\n font-weight: 100;\n src:\n local('Roboto Thin italic'),\n local('Roboto-Thinitalic'),\n url('./files/roboto-latin-100italic.woff2') format('woff2'), /* Super Modern Browsers */\n url('./files/roboto-latin-100italic.woff') format('woff'); /* Modern Browsers */\n}\n\n/* roboto-300normal - latin */\n@font-face {\n font-family: 'Roboto';\n font-style: normal;\n font-display: swap;\n font-weight: 300;\n src:\n local('Roboto Light '),\n local('Roboto-Light'),\n url('./files/roboto-latin-300.woff2') format('woff2'), /* Super Modern Browsers */\n url('./files/roboto-latin-300.woff') format('woff'); /* Modern Browsers */\n}\n\n/* roboto-300italic - latin */\n@font-face {\n font-family: 'Roboto';\n font-style: italic;\n font-display: swap;\n font-weight: 300;\n src:\n local('Roboto Light italic'),\n local('Roboto-Lightitalic'),\n url('./files/roboto-latin-300italic.woff2') format('woff2'), /* Super Modern Browsers */\n url('./files/roboto-latin-300italic.woff') format('woff'); /* Modern Browsers */\n}\n\n/* roboto-400normal - latin */\n@font-face {\n font-family: 'Roboto';\n font-style: normal;\n font-display: swap;\n font-weight: 400;\n src:\n local('Roboto Regular '),\n local('Roboto-Regular'),\n url('./files/roboto-latin-400.woff2') format('woff2'), /* Super Modern Browsers */\n url('./files/roboto-latin-400.woff') format('woff'); /* Modern Browsers */\n}\n\n/* roboto-400italic - latin */\n@font-face {\n font-family: 'Roboto';\n font-style: italic;\n font-display: swap;\n font-weight: 400;\n src:\n local('Roboto Regular italic'),\n local('Roboto-Regularitalic'),\n url('./files/roboto-latin-400italic.woff2') format('woff2'), /* Super Modern Browsers */\n url('./files/roboto-latin-400italic.woff') format('woff'); /* Modern Browsers */\n}\n\n/* roboto-500normal - latin */\n@font-face {\n font-family: 'Roboto';\n font-style: normal;\n font-display: swap;\n font-weight: 500;\n src:\n local('Roboto Medium '),\n local('Roboto-Medium'),\n url('./files/roboto-latin-500.woff2') format('woff2'), /* Super Modern Browsers */\n url('./files/roboto-latin-500.woff') format('woff'); /* Modern Browsers */\n}\n\n/* roboto-500italic - latin */\n@font-face {\n font-family: 'Roboto';\n font-style: italic;\n font-display: swap;\n font-weight: 500;\n src:\n local('Roboto Medium italic'),\n local('Roboto-Mediumitalic'),\n url('./files/roboto-latin-500italic.woff2') format('woff2'), /* Super Modern Browsers */\n url('./files/roboto-latin-500italic.woff') format('woff'); /* Modern Browsers */\n}\n\n/* roboto-700normal - latin */\n@font-face {\n font-family: 'Roboto';\n font-style: normal;\n font-display: swap;\n font-weight: 700;\n src:\n local('Roboto Bold '),\n local('Roboto-Bold'),\n url('./files/roboto-latin-700.woff2') format('woff2'), /* Super Modern Browsers */\n url('./files/roboto-latin-700.woff') format('woff'); /* Modern Browsers */\n}\n\n/* roboto-700italic - latin */\n@font-face {\n font-family: 'Roboto';\n font-style: italic;\n font-display: swap;\n font-weight: 700;\n src:\n local('Roboto Bold italic'),\n local('Roboto-Bolditalic'),\n url('./files/roboto-latin-700italic.woff2') format('woff2'), /* Super Modern Browsers */\n url('./files/roboto-latin-700italic.woff') format('woff'); /* Modern Browsers */\n}\n\n/* roboto-900normal - latin */\n@font-face {\n font-family: 'Roboto';\n font-style: normal;\n font-display: swap;\n font-weight: 900;\n src:\n local('Roboto Black '),\n local('Roboto-Black'),\n url('./files/roboto-latin-900.woff2') format('woff2'), /* Super Modern Browsers */\n url('./files/roboto-latin-900.woff') format('woff'); /* Modern Browsers */\n}\n\n/* roboto-900italic - latin */\n@font-face {\n font-family: 'Roboto';\n font-style: italic;\n font-display: swap;\n font-weight: 900;\n src:\n local('Roboto Black italic'),\n local('Roboto-Blackitalic'),\n url('./files/roboto-latin-900italic.woff2') format('woff2'), /* Super Modern Browsers */\n url('./files/roboto-latin-900italic.woff') format('woff'); /* Modern Browsers */\n}\n\n","/*\nDracula Theme v1.2.0\nhttps://github.com/zenorocha/dracula-theme\nCopyright 2015, All rights reserved\nCode licensed under the MIT license\nhttp://zenorocha.mit-license.org\n@author Éverton Ribeiro \n@author Zeno Rocha \n*/\n.hljs-dark {\n display: block;\n overflow-x: auto;\n padding: 0.5em;\n color: #f8f8f2;\n}\n.hljs-dark .hljs-number,\n.hljs-dark .hljs-keyword,\n.hljs-dark .hljs-selector-tag,\n.hljs-dark .hljs-literal,\n.hljs-dark .hljs-section,\n.hljs-dark .hljs-link {\n color: #8be9fd;\n}\n.hljs-dark .hljs-function .hljs-keyword {\n color: #ff79c6;\n}\n.hljs-dark .hljs-string,\n.hljs-dark .hljs-title,\n.hljs-dark .hljs-name,\n.hljs-dark .hljs-type,\n.hljs-dark .hljs-attribute,\n.hljs-dark .hljs-symbol,\n.hljs-dark .hljs-bullet,\n.hljs-dark .hljs-addition,\n.hljs-dark .hljs-variable,\n.hljs-dark .hljs-template-tag,\n.hljs-dark .hljs-template-variable {\n color: #f1fa8c;\n}\n.hljs-dark .hljs-comment,\n.hljs-dark .hljs-quote,\n.hljs-dark .hljs-deletion,\n.hljs-dark .hljs-meta {\n color: #6272a4;\n}\n.hljs-dark .hljs-keyword,\n.hljs-dark .hljs-selector-tag,\n.hljs-dark .hljs-literal,\n.hljs-dark .hljs-title,\n.hljs-dark .hljs-section,\n.hljs-dark .hljs-doctag,\n.hljs-dark .hljs-type,\n.hljs-dark .hljs-name,\n.hljs-dark .hljs-strong {\n font-weight: bold;\n}\n.hljs-dark .hljs-emphasis {\n font-style: italic;\n}\n","/*\ngithub.com style (c) Vasily Polovnyov \n*/\n\n.hljs-light {\n display: block;\n overflow-x: auto;\n padding: 0.5em;\n color: #333;\n}\n\n.hljs-light .hljs-comment,\n.hljs-light .hljs-quote {\n color: #998;\n font-style: italic;\n}\n\n.hljs-light .hljs-keyword,\n.hljs-light .hljs-selector-tag,\n.hljs-light .hljs-subst {\n color: #333;\n font-weight: bold;\n}\n\n.hljs-light .hljs-number,\n.hljs-light .hljs-literal,\n.hljs-light .hljs-variable,\n.hljs-light .hljs-template-variable,\n.hljs-light .hljs-tag .hljs-attr {\n color: #008080;\n}\n\n.hljs-light .hljs-string,\n.hljs-light .hljs-doctag {\n color: #d14;\n}\n\n.hljs-light .hljs-title,\n.hljs-light .hljs-section,\n.hljs-light .hljs-selector-id {\n color: #900;\n font-weight: bold;\n}\n\n.hljs-light .hljs-subst {\n font-weight: normal;\n}\n\n.hljs-light .hljs-type,\n.hljs-light .hljs-class .hljs-title {\n color: #458;\n font-weight: bold;\n}\n\n.hljs-light .hljs-tag,\n.hljs-light .hljs-name,\n.hljs-light .hljs-attribute {\n color: #000080;\n font-weight: normal;\n}\n\n.hljs-light .hljs-regexp,\n.hljs-light .hljs-link {\n color: #009926;\n}\n\n.hljs-light .hljs-symbol,\n.hljs-light .hljs-bullet {\n color: #990073;\n}\n\n.hljs-light .hljs-built_in,\n.hljs-light .hljs-builtin-name {\n color: #0086b3;\n}\n\n.hljs-light .hljs-meta {\n color: #999;\n font-weight: bold;\n}\n\n.hljs-light .hljs-deletion {\n background: #fdd;\n}\n\n.hljs-light .hljs-addition {\n background: #dfd;\n}\n\n.hljs-light .hljs-emphasis {\n font-style: italic;\n}\n\n.hljs-light .hljs-strong {\n font-weight: bold;\n}\n","span.find-kws {\n background-color: #ffd800;\n}\n"],"names":[],"sourceRoot":""} \ No newline at end of file diff --git a/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/js/495.01ff0983.chunk.js b/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/js/495.01ff0983.chunk.js new file mode 100644 index 0000000000000000000000000000000000000000..dfd18248aea135f02d94fe817908e74635bf2a6b --- /dev/null +++ b/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/js/495.01ff0983.chunk.js @@ -0,0 +1,2 @@ +"use strict";(self.webpackChunkray_dashboard_client=self.webpackChunkray_dashboard_client||[]).push([[495],{6495:(a,d,s)=>{s.r(d),s.d(d,{default:()=>h});var e=s(697),r=(s(2791),s(9903)),c=s(184);const h=()=>(0,c.jsx)(e.Z,{sx:{padding:2,width:"100%",backgroundColor:"white"},children:(0,c.jsx)(r.Z,{})})}}]); +//# sourceMappingURL=495.01ff0983.chunk.js.map \ No newline at end of file diff --git a/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/js/495.01ff0983.chunk.js.map b/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/js/495.01ff0983.chunk.js.map new file mode 100644 index 0000000000000000000000000000000000000000..c87650c8e4858e03b28b9e2fa8bed771251eec11 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/js/495.01ff0983.chunk.js.map @@ -0,0 +1 @@ +{"version":3,"file":"static/js/495.01ff0983.chunk.js","mappings":"mMAOA,MAcA,EAde,KAEX,SAAC,IAAG,CACFA,GAAI,CACFC,QAAS,EACTC,MAAO,OACPC,gBAAiB,SACjB,UAEF,SAAC,IAAS,K","sources":["pages/actor/index.tsx"],"sourcesContent":["import { Box } from \"@mui/material\";\nimport React from \"react\";\nimport ActorList from \"./ActorList\";\n\n/**\n * Represent the standalone actors page.\n */\nconst Actors = () => {\n return (\n \n \n \n );\n};\n\nexport default Actors;\n"],"names":["sx","padding","width","backgroundColor"],"sourceRoot":""} \ No newline at end of file diff --git a/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/js/591.222f4f03.chunk.js b/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/js/591.222f4f03.chunk.js new file mode 100644 index 0000000000000000000000000000000000000000..9de2990460668cbd35f910e72243d2d790452319 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/js/591.222f4f03.chunk.js @@ -0,0 +1,2 @@ +"use strict";(self.webpackChunkray_dashboard_client=self.webpackChunkray_dashboard_client||[]).push([[591],{2591:(a,t,c)=>{c.r(t),c.d(t,{default:()=>j});var e=c(697),s=c(1889),i=c(8406),n=c(4387),d=c(4518),l=c(2791),r=c(7689),p=c(1035),u=c(359),o=c(4569),g=c.n(o);var h=c(184);const j=()=>{const{cmd:a,ip:t,pid:c}=(0,r.UO)(),[o,j]=(0,l.useState)(),[m,x]=(0,l.useState)("gcutil"),k=(0,l.useCallback)((()=>((a,t,c)=>g().get("utils/jstat",{params:{ip:a,pid:t,options:c}}))(t,c,m).then((a=>{a.data.result?j(a.data.data.output):j(a.data.msg)})).catch((a=>j(a.toString())))),[t,c,m]);return(0,l.useEffect)((()=>{switch(a){case"jstack":((a,t)=>g().get("utils/jstack",{params:{ip:a,pid:t}}))(t,c).then((a=>{a.data.result?j(a.data.data.output):j(a.data.msg)})).catch((a=>j(a.toString())));break;case"jmap":((a,t)=>g().get("utils/jmap",{params:{ip:a,pid:t}}))(t,c).then((a=>{a.data.result?j(a.data.data.output):j(a.data.msg)})).catch((a=>j(a.toString())));break;case"jstat":k();break;default:j("Command ".concat(a," is not supported."))}}),[a,k,t,c]),(0,h.jsxs)(e.Z,{sx:{padding:4,width:"100%"},children:[(0,h.jsx)(u.Z,{title:a,children:"jstat"===a&&(0,h.jsx)(e.Z,{sx:{padding:2,marginTop:2},children:(0,h.jsxs)(s.ZP,{container:!0,spacing:1,children:[(0,h.jsx)(s.ZP,{item:!0,children:(0,h.jsx)(i.Z,{value:m,onChange:a=>x(a.target.value),children:["class","compiler","gc","gccapacity","gcmetacapacity","gcnew","gcnewcapacity","gcold","gcoldcapacity","gcutil","gccause","printcompilation"].map((a=>(0,h.jsx)(n.Z,{value:a,children:a})))})}),(0,h.jsx)(s.ZP,{item:!0,children:(0,h.jsx)(d.Z,{onClick:k,children:"Execute"})})]})})}),(0,h.jsx)(u.Z,{title:"IP: ".concat(t," / Pid: ").concat(c),children:(0,h.jsx)(p.Z,{content:o||"loading",language:"prolog",height:800})})]})}}}]); +//# sourceMappingURL=591.222f4f03.chunk.js.map \ No newline at end of file diff --git a/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/js/591.222f4f03.chunk.js.map b/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/js/591.222f4f03.chunk.js.map new file mode 100644 index 0000000000000000000000000000000000000000..a31d0a214339d5e7565cb6f690e57caebf405730 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/js/591.222f4f03.chunk.js.map @@ -0,0 +1 @@ +{"version":3,"file":"static/js/591.222f4f03.chunk.js","mappings":"qRAOA,MAsGA,EAtGkB,KAChB,MAAM,IAAEA,EAAG,GAAEC,EAAE,IAAEC,IAAQC,EAAAA,EAAAA,OAKlBC,EAAQC,IAAaC,EAAAA,EAAAA,aACrBC,EAAQC,IAAaF,EAAAA,EAAAA,UAAS,UAC/BG,GAAeC,EAAAA,EAAAA,cACnB,ICYoB,EAACT,EAAYC,EAAaS,IACzCC,IAAAA,IAAkB,cAAe,CACtCC,OAAQ,CACNZ,KACAC,MACAS,aDhBAG,CAASb,EAAIC,EAAKK,GACfQ,MAAMC,IACDA,EAAIC,KAAKb,OACXC,EAAUW,EAAIC,KAAKA,KAAKC,QAExBb,EAAUW,EAAIC,KAAKE,IACrB,IAEDC,OAAOC,GAAQhB,EAAUgB,EAAIC,eAClC,CAACrB,EAAIC,EAAKK,IAoCZ,OAjCAgB,EAAAA,EAAAA,YAAU,KACR,OAAQvB,GACN,IAAK,SCrBc,EAACC,EAAYC,IAC7BU,IAAAA,IAAkB,eAAgB,CACvCC,OAAQ,CACNZ,KACAC,SDkBEsB,CAAUvB,EAAIC,GACXa,MAAMC,IACDA,EAAIC,KAAKb,OACXC,EAAUW,EAAIC,KAAKA,KAAKC,QAExBb,EAAUW,EAAIC,KAAKE,IACrB,IAEDC,OAAOC,GAAQhB,EAAUgB,EAAIC,cAChC,MACF,IAAK,OCvBY,EAACrB,EAAYC,IAC3BU,IAAAA,IAAkB,aAAc,CACrCC,OAAQ,CACNZ,KACAC,SDoBEuB,CAAQxB,EAAIC,GACTa,MAAMC,IACDA,EAAIC,KAAKb,OACXC,EAAUW,EAAIC,KAAKA,KAAKC,QAExBb,EAAUW,EAAIC,KAAKE,IACrB,IAEDC,OAAOC,GAAQhB,EAAUgB,EAAIC,cAChC,MACF,IAAK,QACHb,IACA,MACF,QACEJ,EAAU,WAAD,OAAYL,EAAG,uBAClB,GAET,CAACA,EAAKS,EAAcR,EAAIC,KAGzB,UAACwB,EAAA,EAAG,CAACC,GAAI,CAAEC,QAAS,EAAGC,MAAO,QAAS,WACrC,SAACC,EAAA,EAAS,CAACC,MAAO/B,EAAI,SACX,UAARA,IACC,SAAC0B,EAAA,EAAG,CAACC,GAAI,CAAEC,QAAS,EAAGI,UAAW,GAAI,UACpC,UAACC,EAAA,GAAI,CAACC,WAAS,EAACC,QAAS,EAAE,WACzB,SAACF,EAAA,GAAI,CAACG,MAAI,YACR,SAACC,EAAA,EAAM,CACLC,MAAO/B,EACPgC,SAAWC,GAAMhC,EAAUgC,EAAEC,OAAOH,OAAiB,SAEpD,CACC,QACA,WACA,KACA,aACA,iBACA,QACA,gBACA,QACA,gBACA,SACA,UACA,oBACAI,KAAKF,IACL,SAACG,EAAA,EAAQ,CAACL,MAAOE,EAAE,SAAEA,WAI3B,SAACP,EAAA,GAAI,CAACG,MAAI,YACR,SAACQ,EAAA,EAAM,CAACC,QAASpC,EAAa,+BAMxC,SAACqB,EAAA,EAAS,CAACC,MAAK,cAAS9B,EAAE,mBAAWC,GAAM,UAC1C,SAAC4C,EAAA,EAAc,CACbC,QAAS3C,GAAU,UACnB4C,SAAS,SACTC,OAAQ,UAGR,C","sources":["pages/cmd/CMDResult.tsx","service/util.ts"],"sourcesContent":["import { Box, Button, Grid, MenuItem, Select } from \"@mui/material\";\nimport React, { useCallback, useEffect, useState } from \"react\";\nimport { useParams } from \"react-router-dom\";\nimport LogVirtualView from \"../../components/LogView/LogVirtualView\";\nimport TitleCard from \"../../components/TitleCard\";\nimport { getJmap, getJstack, getJstat } from \"../../service/util\";\n\nconst CMDResult = () => {\n const { cmd, ip, pid } = useParams() as {\n cmd: string;\n ip: string;\n pid: string;\n };\n const [result, setResult] = useState();\n const [option, setOption] = useState(\"gcutil\");\n const executeJstat = useCallback(\n () =>\n getJstat(ip, pid, option)\n .then((rsp) => {\n if (rsp.data.result) {\n setResult(rsp.data.data.output);\n } else {\n setResult(rsp.data.msg);\n }\n })\n .catch((err) => setResult(err.toString())),\n [ip, pid, option],\n );\n\n useEffect(() => {\n switch (cmd) {\n case \"jstack\":\n getJstack(ip, pid)\n .then((rsp) => {\n if (rsp.data.result) {\n setResult(rsp.data.data.output);\n } else {\n setResult(rsp.data.msg);\n }\n })\n .catch((err) => setResult(err.toString()));\n break;\n case \"jmap\":\n getJmap(ip, pid)\n .then((rsp) => {\n if (rsp.data.result) {\n setResult(rsp.data.data.output);\n } else {\n setResult(rsp.data.msg);\n }\n })\n .catch((err) => setResult(err.toString()));\n break;\n case \"jstat\":\n executeJstat();\n break;\n default:\n setResult(`Command ${cmd} is not supported.`);\n break;\n }\n }, [cmd, executeJstat, ip, pid]);\n\n return (\n \n \n {cmd === \"jstat\" && (\n \n \n \n setOption(e.target.value as string)}\n >\n {[\n \"class\",\n \"compiler\",\n \"gc\",\n \"gccapacity\",\n \"gcmetacapacity\",\n \"gcnew\",\n \"gcnewcapacity\",\n \"gcold\",\n \"gcoldcapacity\",\n \"gcutil\",\n \"gccause\",\n \"printcompilation\",\n ].map((e) => (\n {e}\n ))}\n \n \n \n \n \n \n \n )}\n \n \n \n \n \n );\n};\n\nexport default CMDResult;\n","import axios from \"axios\";\n\ntype CMDRsp = {\n result: boolean;\n msg: string;\n data: {\n output: string;\n };\n};\n\nexport const getJstack = (ip: string, pid: string) => {\n return axios.get(\"utils/jstack\", {\n params: {\n ip,\n pid,\n },\n });\n};\n\nexport const getJmap = (ip: string, pid: string) => {\n return axios.get(\"utils/jmap\", {\n params: {\n ip,\n pid,\n },\n });\n};\n\nexport const getJstat = (ip: string, pid: string, options: string) => {\n return axios.get(\"utils/jstat\", {\n params: {\n ip,\n pid,\n options,\n },\n });\n};\n\ntype NamespacesRsp = {\n result: boolean;\n msg: string;\n data: {\n namespaces: {\n namespaceId: string;\n hostNameList: string[];\n }[];\n };\n};\n\nexport const getNamespaces = () => {\n return axios.get(\"namespaces\");\n};\n"],"names":["cmd","ip","pid","useParams","result","setResult","useState","option","setOption","executeJstat","useCallback","options","axios","params","getJstat","then","rsp","data","output","msg","catch","err","toString","useEffect","getJstack","getJmap","Box","sx","padding","width","TitleCard","title","marginTop","Grid","container","spacing","item","Select","value","onChange","e","target","map","MenuItem","Button","onClick","LogVirtualView","content","language","height"],"sourceRoot":""} \ No newline at end of file diff --git a/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/js/main.378317da.js.LICENSE.txt b/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/js/main.378317da.js.LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..979bc10ea9684d0b853c16f2b14f62d0e86d18d1 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/js/main.378317da.js.LICENSE.txt @@ -0,0 +1,112 @@ +/*! js-yaml 4.1.0 https://github.com/nodeca/js-yaml @license MIT */ + +/** + * @license + * Lodash + * Copyright OpenJS Foundation and other contributors + * Released under MIT license + * Based on Underscore.js 1.8.3 + * Copyright Jeremy Ashkenas, DocumentCloud and Investigative Reporters & Editors + */ + +/** + * @license React + * react-dom.production.min.js + * + * Copyright (c) Facebook, Inc. and its affiliates. + * + * This source code is licensed under the MIT license found in the + * LICENSE file in the root directory of this source tree. + */ + +/** + * @license React + * react-is.production.min.js + * + * Copyright (c) Facebook, Inc. and its affiliates. + * + * This source code is licensed under the MIT license found in the + * LICENSE file in the root directory of this source tree. + */ + +/** + * @license React + * react-jsx-runtime.production.min.js + * + * Copyright (c) Facebook, Inc. and its affiliates. + * + * This source code is licensed under the MIT license found in the + * LICENSE file in the root directory of this source tree. + */ + +/** + * @license React + * react.production.min.js + * + * Copyright (c) Facebook, Inc. and its affiliates. + * + * This source code is licensed under the MIT license found in the + * LICENSE file in the root directory of this source tree. + */ + +/** + * @license React + * scheduler.production.min.js + * + * Copyright (c) Facebook, Inc. and its affiliates. + * + * This source code is licensed under the MIT license found in the + * LICENSE file in the root directory of this source tree. + */ + +/** + * @license React + * use-sync-external-store-shim.production.min.js + * + * Copyright (c) Facebook, Inc. and its affiliates. + * + * This source code is licensed under the MIT license found in the + * LICENSE file in the root directory of this source tree. + */ + +/** + * @remix-run/router v1.0.3 + * + * Copyright (c) Remix Software Inc. + * + * This source code is licensed under the MIT license found in the + * LICENSE.md file in the root directory of this source tree. + * + * @license MIT + */ + +/** + * React Router DOM v6.4.3 + * + * Copyright (c) Remix Software Inc. + * + * This source code is licensed under the MIT license found in the + * LICENSE.md file in the root directory of this source tree. + * + * @license MIT + */ + +/** + * React Router v6.4.3 + * + * Copyright (c) Remix Software Inc. + * + * This source code is licensed under the MIT license found in the + * LICENSE.md file in the root directory of this source tree. + * + * @license MIT + */ + +/** @license React v16.13.1 + * react-is.production.min.js + * + * Copyright (c) Facebook, Inc. and its affiliates. + * + * This source code is licensed under the MIT license found in the + * LICENSE file in the root directory of this source tree. + */ diff --git a/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/media/logo.3704c1bbca650bb72a64b5d4c3fa5ced.svg b/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/media/logo.3704c1bbca650bb72a64b5d4c3fa5ced.svg new file mode 100644 index 0000000000000000000000000000000000000000..70be9ee548c6a425e5e2b1621f0ee8f980a2ac8e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/media/logo.3704c1bbca650bb72a64b5d4c3fa5ced.svg @@ -0,0 +1,34 @@ + + + + +Ray Logo + + + + + + + + + + diff --git a/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/media/roboto-latin-100.a45108d3b34af91f9113.woff b/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/media/roboto-latin-100.a45108d3b34af91f9113.woff new file mode 100644 index 0000000000000000000000000000000000000000..7306a7b7141ad287fe93b046009c053591184626 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/media/roboto-latin-100.a45108d3b34af91f9113.woff differ diff --git a/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/media/roboto-latin-100italic.451d4e559d6f57cdf6a1.woff b/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/media/roboto-latin-100italic.451d4e559d6f57cdf6a1.woff new file mode 100644 index 0000000000000000000000000000000000000000..4e0c9295f249faded9e9ebeea494cb20272cd21f Binary files /dev/null and b/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/media/roboto-latin-100italic.451d4e559d6f57cdf6a1.woff differ diff --git a/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/media/roboto-latin-300.865f928cbabcc9f8f2b5.woff b/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/media/roboto-latin-300.865f928cbabcc9f8f2b5.woff new file mode 100644 index 0000000000000000000000000000000000000000..2f6bdb5e741df8575a0b43b80fc97e32a7e0463b Binary files /dev/null and b/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/media/roboto-latin-300.865f928cbabcc9f8f2b5.woff differ diff --git a/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/media/roboto-latin-400.176f8f5bd5f02b3abfcf.woff2 b/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/media/roboto-latin-400.176f8f5bd5f02b3abfcf.woff2 new file mode 100644 index 0000000000000000000000000000000000000000..1a537015109f44983c26738a436c8f5edc44b9ae Binary files /dev/null and b/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/media/roboto-latin-400.176f8f5bd5f02b3abfcf.woff2 differ diff --git a/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/media/roboto-latin-400.49ae34d4cc6b98c00c69.woff b/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/media/roboto-latin-400.49ae34d4cc6b98c00c69.woff new file mode 100644 index 0000000000000000000000000000000000000000..69c88254051499539452130e33c36a20e9469e8e Binary files /dev/null and b/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/media/roboto-latin-400.49ae34d4cc6b98c00c69.woff differ diff --git a/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/media/roboto-latin-400italic.b1d9d9904bfca8802a63.woff b/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/media/roboto-latin-400italic.b1d9d9904bfca8802a63.woff new file mode 100644 index 0000000000000000000000000000000000000000..b940dbcb78e3c81d91c29275453d4223dc72a116 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/media/roboto-latin-400italic.b1d9d9904bfca8802a63.woff differ diff --git a/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/media/roboto-latin-400italic.d022bc70dc1bf7b3425d.woff2 b/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/media/roboto-latin-400italic.d022bc70dc1bf7b3425d.woff2 new file mode 100644 index 0000000000000000000000000000000000000000..2741d4f0829e357ce245b819c46eaf7279cf3ff2 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/media/roboto-latin-400italic.d022bc70dc1bf7b3425d.woff2 differ diff --git a/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/media/roboto-latin-500.cea99d3e3e13a3a599a0.woff b/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/media/roboto-latin-500.cea99d3e3e13a3a599a0.woff new file mode 100644 index 0000000000000000000000000000000000000000..869925869a7b9d5e5080cc3e4f5291611b08dcfa Binary files /dev/null and b/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/media/roboto-latin-500.cea99d3e3e13a3a599a0.woff differ diff --git a/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/media/roboto-latin-500italic.18d00f739ff1e1c52db1.woff b/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/media/roboto-latin-500italic.18d00f739ff1e1c52db1.woff new file mode 100644 index 0000000000000000000000000000000000000000..b794d20deda4dcf984deffa3be83c93fca8cb73f Binary files /dev/null and b/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/media/roboto-latin-500italic.18d00f739ff1e1c52db1.woff differ diff --git a/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/media/roboto-latin-700.2267169ee7270a22a963.woff b/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/media/roboto-latin-700.2267169ee7270a22a963.woff new file mode 100644 index 0000000000000000000000000000000000000000..0f14effba09e91884ebca24e1c6e565ef8d0977e Binary files /dev/null and b/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/media/roboto-latin-700.2267169ee7270a22a963.woff differ diff --git a/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/media/roboto-latin-700italic.9360531f9bb817f917f0.woff b/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/media/roboto-latin-700italic.9360531f9bb817f917f0.woff new file mode 100644 index 0000000000000000000000000000000000000000..85ec25839c965dc521de565c4e9f8241eaab2162 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/media/roboto-latin-700italic.9360531f9bb817f917f0.woff differ diff --git a/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/media/roboto-latin-900.870c8c1486f76054301a.woff2 b/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/media/roboto-latin-900.870c8c1486f76054301a.woff2 new file mode 100644 index 0000000000000000000000000000000000000000..802499d3fd9da031f14759635a3c7a489e84d072 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/media/roboto-latin-900.870c8c1486f76054301a.woff2 differ diff --git a/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/media/roboto-latin-900.bac8362e7a6ea60b6983.woff b/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/media/roboto-latin-900.bac8362e7a6ea60b6983.woff new file mode 100644 index 0000000000000000000000000000000000000000..4d50531e3e8970970a867c4b459c044bad3a7925 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/media/roboto-latin-900.bac8362e7a6ea60b6983.woff differ diff --git a/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/media/roboto-latin-900italic.c20d916c1a1b094c1cec.woff b/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/media/roboto-latin-900italic.c20d916c1a1b094c1cec.woff new file mode 100644 index 0000000000000000000000000000000000000000..a3dd7c2d6d373c41e68cb685df4a13e6b5cdee4b Binary files /dev/null and b/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/media/roboto-latin-900italic.c20d916c1a1b094c1cec.woff differ diff --git a/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/media/roboto-latin-900italic.cb5ad999740e9d8a8bd1.woff2 b/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/media/roboto-latin-900italic.cb5ad999740e9d8a8bd1.woff2 new file mode 100644 index 0000000000000000000000000000000000000000..7d846b1a4c5d005c8171512bb667de92c44afa7b Binary files /dev/null and b/.venv/lib/python3.11/site-packages/ray/dashboard/client/build/static/media/roboto-latin-900italic.cb5ad999740e9d8a8bd1.woff2 differ diff --git a/.venv/lib/python3.11/site-packages/ray/serve/__init__.py b/.venv/lib/python3.11/site-packages/ray/serve/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..fb33eaeb04fb8f2ec55f1fe527e33d5483e15237 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/ray/serve/__init__.py @@ -0,0 +1,57 @@ +import ray._private.worker + +try: + from ray.serve._private.logging_utils import configure_default_serve_logger + from ray.serve.api import ( + Application, + Deployment, + _run, + delete, + deployment, + get_app_handle, + get_deployment_handle, + get_multiplexed_model_id, + get_replica_context, + ingress, + multiplexed, + run, + shutdown, + start, + status, + ) + from ray.serve.batching import batch + from ray.serve.config import HTTPOptions + +except ModuleNotFoundError as e: + e.msg += ( + '. You can run `pip install "ray[serve]"` to install all Ray Serve' + " dependencies." + ) + raise e + +# Setup default ray.serve logger to ensure all serve module logs are captured. +configure_default_serve_logger() + +# Mute the warning because Serve sometimes intentionally calls +# ray.get inside async actors. +ray._private.worker.blocking_get_inside_async_warned = True + +__all__ = [ + "_run", + "batch", + "start", + "HTTPOptions", + "get_replica_context", + "shutdown", + "ingress", + "deployment", + "run", + "delete", + "Application", + "Deployment", + "multiplexed", + "get_multiplexed_model_id", + "status", + "get_app_handle", + "get_deployment_handle", +] diff --git a/.venv/lib/python3.11/site-packages/ray/serve/__pycache__/__init__.cpython-311.pyc b/.venv/lib/python3.11/site-packages/ray/serve/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14bdbfa55457960854da68ebcc6f56d6c938f895 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/ray/serve/__pycache__/__init__.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/ray/serve/__pycache__/api.cpython-311.pyc b/.venv/lib/python3.11/site-packages/ray/serve/__pycache__/api.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..765c8ab79425ce62d138ee45d95f21614b526d34 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/ray/serve/__pycache__/api.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/ray/serve/__pycache__/autoscaling_policy.cpython-311.pyc b/.venv/lib/python3.11/site-packages/ray/serve/__pycache__/autoscaling_policy.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1a4eefd7ba8fd1f509b7353fab406385758b97c1 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/ray/serve/__pycache__/autoscaling_policy.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/ray/serve/__pycache__/batching.cpython-311.pyc b/.venv/lib/python3.11/site-packages/ray/serve/__pycache__/batching.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b9a7b8f44eb5453986f6836f424335ae7668eaab Binary files /dev/null and b/.venv/lib/python3.11/site-packages/ray/serve/__pycache__/batching.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/ray/serve/__pycache__/config.cpython-311.pyc b/.venv/lib/python3.11/site-packages/ray/serve/__pycache__/config.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f722776d2a344620dae3181f1e4253679109709a Binary files /dev/null and b/.venv/lib/python3.11/site-packages/ray/serve/__pycache__/config.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/ray/serve/__pycache__/context.cpython-311.pyc b/.venv/lib/python3.11/site-packages/ray/serve/__pycache__/context.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b008eb5558fca2df64f66f040c7225a420532c12 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/ray/serve/__pycache__/context.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/ray/serve/__pycache__/dag.cpython-311.pyc b/.venv/lib/python3.11/site-packages/ray/serve/__pycache__/dag.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f3953ce5bfa62952edbcb5ab7358b5358692d7ce Binary files /dev/null and b/.venv/lib/python3.11/site-packages/ray/serve/__pycache__/dag.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/ray/serve/__pycache__/deployment.cpython-311.pyc b/.venv/lib/python3.11/site-packages/ray/serve/__pycache__/deployment.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d1fa1b97b100e2ff7ae933f9351635e2d5a8e15e Binary files /dev/null and b/.venv/lib/python3.11/site-packages/ray/serve/__pycache__/deployment.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/ray/serve/__pycache__/exceptions.cpython-311.pyc b/.venv/lib/python3.11/site-packages/ray/serve/__pycache__/exceptions.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..73a7d386ef4f5c210a2282d192aed4fe75eababa Binary files /dev/null and b/.venv/lib/python3.11/site-packages/ray/serve/__pycache__/exceptions.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/ray/serve/__pycache__/gradio_integrations.cpython-311.pyc b/.venv/lib/python3.11/site-packages/ray/serve/__pycache__/gradio_integrations.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e8cd3d2b798322030c79dec046244a69362b78cc Binary files /dev/null and b/.venv/lib/python3.11/site-packages/ray/serve/__pycache__/gradio_integrations.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/ray/serve/__pycache__/grpc_util.cpython-311.pyc b/.venv/lib/python3.11/site-packages/ray/serve/__pycache__/grpc_util.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c0481a17af56a95dbbadfa85038cf08cbccc26b9 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/ray/serve/__pycache__/grpc_util.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/ray/serve/__pycache__/handle.cpython-311.pyc b/.venv/lib/python3.11/site-packages/ray/serve/__pycache__/handle.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..831940323c287ed6fed339ec87ff19dbc4884120 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/ray/serve/__pycache__/handle.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/ray/serve/__pycache__/metrics.cpython-311.pyc b/.venv/lib/python3.11/site-packages/ray/serve/__pycache__/metrics.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b7ea33ef0819a25183c00d5205773a6a929b4f94 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/ray/serve/__pycache__/metrics.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/ray/serve/__pycache__/multiplex.cpython-311.pyc b/.venv/lib/python3.11/site-packages/ray/serve/__pycache__/multiplex.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0df32fc4cbce5ae02eb89319ed829ad57d5aac74 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/ray/serve/__pycache__/multiplex.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/ray/serve/__pycache__/schema.cpython-311.pyc b/.venv/lib/python3.11/site-packages/ray/serve/__pycache__/schema.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..84bb70c538e41e8e57dc4ceb3c4fae6c36c9c650 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/ray/serve/__pycache__/schema.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/ray/serve/__pycache__/scripts.cpython-311.pyc b/.venv/lib/python3.11/site-packages/ray/serve/__pycache__/scripts.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c52c95c94a32f0631ad7a52e786427c69e244c14 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/ray/serve/__pycache__/scripts.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/ray/serve/_private/deployment_info.py b/.venv/lib/python3.11/site-packages/ray/serve/_private/deployment_info.py new file mode 100644 index 0000000000000000000000000000000000000000..28d16a0f9a67c7f0074a1b64e077049d4ec033af --- /dev/null +++ b/.venv/lib/python3.11/site-packages/ray/serve/_private/deployment_info.py @@ -0,0 +1,172 @@ +from typing import Any, Dict, Optional + +import ray +from ray.serve._private.common import TargetCapacityDirection +from ray.serve._private.config import DeploymentConfig, ReplicaConfig +from ray.serve.generated.serve_pb2 import DeploymentInfo as DeploymentInfoProto +from ray.serve.generated.serve_pb2 import ( + TargetCapacityDirection as TargetCapacityDirectionProto, +) + + +class DeploymentInfo: + def __init__( + self, + deployment_config: DeploymentConfig, + replica_config: ReplicaConfig, + start_time_ms: int, + deployer_job_id: str, + actor_name: Optional[str] = None, + version: Optional[str] = None, + end_time_ms: Optional[int] = None, + route_prefix: str = None, + docs_path: str = None, + ingress: bool = False, + target_capacity: Optional[float] = None, + target_capacity_direction: Optional[TargetCapacityDirection] = None, + ): + self.deployment_config = deployment_config + self.replica_config = replica_config + # The time when .deploy() was first called for this deployment. + self.start_time_ms = start_time_ms + self.actor_name = actor_name + self.version = version + self.deployer_job_id = deployer_job_id + # The time when this deployment was deleted. + self.end_time_ms = end_time_ms + + # ephermal state + self._cached_actor_def = None + + self.route_prefix = route_prefix + self.docs_path = docs_path + self.ingress = ingress + + self.target_capacity = target_capacity + self.target_capacity_direction = target_capacity_direction + + def __getstate__(self) -> Dict[Any, Any]: + clean_dict = self.__dict__.copy() + del clean_dict["_cached_actor_def"] + return clean_dict + + def __setstate__(self, d: Dict[Any, Any]) -> None: + self.__dict__ = d + self._cached_actor_def = None + + def update( + self, + deployment_config: DeploymentConfig = None, + replica_config: ReplicaConfig = None, + version: str = None, + route_prefix: str = None, + ) -> "DeploymentInfo": + return DeploymentInfo( + deployment_config=deployment_config or self.deployment_config, + replica_config=replica_config or self.replica_config, + start_time_ms=self.start_time_ms, + deployer_job_id=self.deployer_job_id, + actor_name=self.actor_name, + version=version or self.version, + end_time_ms=self.end_time_ms, + route_prefix=route_prefix or self.route_prefix, + docs_path=self.docs_path, + ingress=self.ingress, + target_capacity=self.target_capacity, + target_capacity_direction=self.target_capacity_direction, + ) + + def set_target_capacity( + self, + new_target_capacity: Optional[float], + new_target_capacity_direction: Optional[TargetCapacityDirection], + ): + self.target_capacity = new_target_capacity + self.target_capacity_direction = new_target_capacity_direction + + def config_changed(self, other) -> bool: + return ( + self.deployment_config != other.deployment_config + or self.replica_config.ray_actor_options + != other.replica_config.ray_actor_options + or other.version is None + or self.version != other.version + ) + + @property + def actor_def(self): + if self._cached_actor_def is None: + assert self.actor_name is not None + + # Break circular import :(. + from ray.serve._private.replica import ReplicaActor + + # Dynamically create a new class with custom name here so Ray picks it up + # correctly in actor metadata table and observability stack. + self._cached_actor_def = ray.remote( + type( + self.actor_name, + (ReplicaActor,), + dict(ReplicaActor.__dict__), + ) + ) + + return self._cached_actor_def + + @classmethod + def from_proto(cls, proto: DeploymentInfoProto): + deployment_config = ( + DeploymentConfig.from_proto(proto.deployment_config) + if proto.deployment_config + else None + ) + + target_capacity = proto.target_capacity if proto.target_capacity != -1 else None + + target_capacity_direction = TargetCapacityDirectionProto.Name( + proto.target_capacity_direction + ) + if target_capacity_direction == "UNSET": + target_capacity_direction = None + else: + target_capacity_direction = TargetCapacityDirection( + target_capacity_direction + ) + + data = { + "deployment_config": deployment_config, + "replica_config": ReplicaConfig.from_proto( + proto.replica_config, + deployment_config.needs_pickle() if deployment_config else True, + ), + "start_time_ms": proto.start_time_ms, + "actor_name": proto.actor_name if proto.actor_name != "" else None, + "version": proto.version if proto.version != "" else None, + "end_time_ms": proto.end_time_ms if proto.end_time_ms != 0 else None, + "deployer_job_id": ray.get_runtime_context().get_job_id(), + "target_capacity": target_capacity, + "target_capacity_direction": target_capacity_direction, + } + + return cls(**data) + + def to_proto(self): + data = { + "start_time_ms": self.start_time_ms, + "actor_name": self.actor_name, + "version": self.version, + "end_time_ms": self.end_time_ms, + } + if self.deployment_config: + data["deployment_config"] = self.deployment_config.to_proto() + if self.replica_config: + data["replica_config"] = self.replica_config.to_proto() + if self.target_capacity is None: + data["target_capacity"] = -1 + else: + data["target_capacity"] = self.target_capacity + if self.target_capacity_direction is None: + data["target_capacity_direction"] = TargetCapacityDirectionProto.UNSET + else: + data["target_capacity_direction"] = self.target_capacity_direction.name + return DeploymentInfoProto(**data) diff --git a/.venv/lib/python3.11/site-packages/ray/serve/_private/grpc_util.py b/.venv/lib/python3.11/site-packages/ray/serve/_private/grpc_util.py new file mode 100644 index 0000000000000000000000000000000000000000..97d77235d7d54f20b283942627310f06f5be9346 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/ray/serve/_private/grpc_util.py @@ -0,0 +1,76 @@ +from typing import Callable, List, Optional, Sequence, Tuple + +import grpc +from grpc.aio._server import Server + +from ray.serve._private.constants import DEFAULT_GRPC_SERVER_OPTIONS + + +class gRPCGenericServer(Server): + """Custom gRPC server that will override all service method handlers. + + Original implementation see: https://github.com/grpc/grpc/blob/ + 60c1701f87cacf359aa1ad785728549eeef1a4b0/src/python/grpcio/grpc/aio/_server.py + """ + + def __init__( + self, + service_handler_factory: Callable, + *, + extra_options: Optional[List[Tuple[str, str]]] = None + ): + super().__init__( + thread_pool=None, + generic_handlers=(), + interceptors=(), + maximum_concurrent_rpcs=None, + compression=None, + options=DEFAULT_GRPC_SERVER_OPTIONS + (extra_options or []), + ) + self.generic_rpc_handlers = [] + self.service_handler_factory = service_handler_factory + + def add_generic_rpc_handlers( + self, generic_rpc_handlers: Sequence[grpc.GenericRpcHandler] + ): + """Override generic_rpc_handlers before adding to the gRPC server. + + This function will override all user defined handlers to have + 1. None `response_serializer` so the server can pass back the + raw protobuf bytes to the user. + 2. `unary_unary` is always calling the unary function generated via + `self.service_handler_factory` + 3. `unary_stream` is always calling the streaming function generated via + `self.service_handler_factory` + """ + serve_rpc_handlers = {} + rpc_handler = generic_rpc_handlers[0] + for service_method, method_handler in rpc_handler._method_handlers.items(): + serve_method_handler = method_handler._replace( + response_serializer=None, + unary_unary=self.service_handler_factory( + service_method=service_method, + stream=False, + ), + unary_stream=self.service_handler_factory( + service_method=service_method, + stream=True, + ), + ) + serve_rpc_handlers[service_method] = serve_method_handler + generic_rpc_handlers[0]._method_handlers = serve_rpc_handlers + self.generic_rpc_handlers.append(generic_rpc_handlers) + super().add_generic_rpc_handlers(generic_rpc_handlers) + + +class DummyServicer: + """Dummy servicer for gRPC server to call on. + + This is a dummy class that just pass through when calling on any method. + User defined servicer function will attempt to add the method on this class to the + gRPC server, but our gRPC server will override the caller to call gRPCProxy. + """ + + def __getattr__(self, attr): + # No-op pass through. Just need this to act as the callable. + pass diff --git a/.venv/lib/python3.11/site-packages/ray/serve/_private/local_testing_mode.py b/.venv/lib/python3.11/site-packages/ray/serve/_private/local_testing_mode.py new file mode 100644 index 0000000000000000000000000000000000000000..af38c04c5e656989dfafd0df0faffe1cd9e9e16d --- /dev/null +++ b/.venv/lib/python3.11/site-packages/ray/serve/_private/local_testing_mode.py @@ -0,0 +1,319 @@ +import asyncio +import concurrent.futures +import inspect +import logging +import queue +import time +from functools import wraps +from typing import Any, Callable, Coroutine, Dict, Optional, Tuple, Union + +import ray +from ray import cloudpickle +from ray.serve._private.common import DeploymentID, RequestMetadata +from ray.serve._private.constants import ( + RAY_SERVE_RUN_SYNC_IN_THREADPOOL, + SERVE_LOGGER_NAME, +) +from ray.serve._private.replica import UserCallableWrapper +from ray.serve._private.replica_result import ReplicaResult +from ray.serve._private.router import Router +from ray.serve._private.utils import GENERATOR_COMPOSITION_NOT_SUPPORTED_ERROR +from ray.serve.deployment import Deployment +from ray.serve.exceptions import RequestCancelledError +from ray.serve.handle import ( + DeploymentHandle, + DeploymentResponse, + DeploymentResponseGenerator, +) + +logger = logging.getLogger(SERVE_LOGGER_NAME) + + +def _validate_deployment_options( + deployment: Deployment, + deployment_id: DeploymentID, +): + if "num_gpus" in deployment.ray_actor_options: + logger.warning( + f"Deployment {deployment_id} has num_gpus configured. " + "CUDA_VISIBLE_DEVICES is not managed automatically in local testing mode. " + ) + + if "runtime_env" in deployment.ray_actor_options: + logger.warning( + f"Deployment {deployment_id} has runtime_env configured. " + "runtime_envs are ignored in local testing mode." + ) + + +def make_local_deployment_handle( + deployment: Deployment, + app_name: str, +) -> DeploymentHandle: + """Constructs an in-process DeploymentHandle. + + This is used in the application build process for local testing mode, + where all deployments of an app run in the local process which enables + faster dev iterations and use of tooling like PDB. + + The user callable will be run on an asyncio loop in a separate thread + (sharing the same code that's used in the replica). + + The constructor for the user callable is run eagerly in this function to + ensure that any exceptions are raised during `serve.run`. + """ + deployment_id = DeploymentID(deployment.name, app_name) + _validate_deployment_options(deployment, deployment_id) + user_callable_wrapper = UserCallableWrapper( + deployment.func_or_class, + deployment.init_args, + deployment.init_kwargs, + deployment_id=deployment_id, + run_sync_methods_in_threadpool=RAY_SERVE_RUN_SYNC_IN_THREADPOOL, + ) + try: + logger.info(f"Initializing local replica class for {deployment_id}.") + user_callable_wrapper.initialize_callable().result() + except Exception: + logger.exception(f"Failed to initialize deployment {deployment_id}.") + raise + + def _create_local_router( + handle_id: str, deployment_id: DeploymentID, handle_options: Any + ) -> Router: + return LocalRouter( + user_callable_wrapper, + deployment_id=deployment_id, + handle_options=handle_options, + ) + + return DeploymentHandle( + deployment.name, + app_name, + _create_router=_create_local_router, + ) + + +class LocalReplicaResult(ReplicaResult): + """ReplicaResult used by in-process Deployment Handles.""" + + OBJ_REF_NOT_SUPPORTED_ERROR = RuntimeError( + "Converting DeploymentResponses to ObjectRefs is not supported " + "in local testing mode." + ) + + def __init__( + self, + future: concurrent.futures.Future, + *, + request_id: str, + is_streaming: bool = False, + generator_result_queue: Optional[queue.Queue] = None, + ): + self._future = future + self._lazy_asyncio_future = None + self._request_id = request_id + self._is_streaming = is_streaming + + # For streaming requests, results must be written to this queue. + # The queue will be consumed until the future is completed. + self._generator_result_queue = generator_result_queue + if self._is_streaming: + assert ( + self._generator_result_queue is not None + ), "generator_result_queue must be provided for streaming results." + + @property + def _asyncio_future(self) -> asyncio.Future: + if self._lazy_asyncio_future is None: + self._lazy_asyncio_future = asyncio.wrap_future(self._future) + + return self._lazy_asyncio_future + + def _process_response(f: Union[Callable, Coroutine]): + @wraps(f) + def wrapper(self, *args, **kwargs): + try: + return f(self, *args, **kwargs) + except (asyncio.CancelledError, concurrent.futures.CancelledError): + raise RequestCancelledError(self._request_id) + + @wraps(f) + async def async_wrapper(self, *args, **kwargs): + try: + return await f(self, *args, **kwargs) + except (asyncio.CancelledError, concurrent.futures.CancelledError): + raise RequestCancelledError(self._request_id) + + if inspect.iscoroutinefunction(f): + return async_wrapper + else: + return wrapper + + @_process_response + def get(self, timeout_s: Optional[float]): + assert ( + not self._is_streaming + ), "get() can only be called on a non-streaming result." + + try: + return self._future.result(timeout=timeout_s) + except concurrent.futures.TimeoutError: + raise TimeoutError("Timed out waiting for result.") + + @_process_response + async def get_async(self): + assert ( + not self._is_streaming + ), "get_async() can only be called on a non-streaming result." + + return await self._asyncio_future + + @_process_response + def __next__(self): + assert self._is_streaming, "next() can only be called on a streaming result." + + while True: + if self._future.done() and self._generator_result_queue.empty(): + if self._future.exception(): + raise self._future.exception() + else: + raise StopIteration + + try: + return self._generator_result_queue.get(timeout=0.01) + except queue.Empty: + pass + + @_process_response + async def __anext__(self): + assert self._is_streaming, "anext() can only be called on a streaming result." + + # This callback does not pull from the queue, only checks that a result is + # available, else there is a race condition where the future finishes and the + # queue is empty, but this function hasn't returned the result yet. + def _wait_for_result(): + while True: + if self._future.done() or not self._generator_result_queue.empty(): + return + time.sleep(0.01) + + wait_for_result_task = asyncio.get_running_loop().create_task( + asyncio.to_thread(_wait_for_result), + ) + done, _ = await asyncio.wait( + [self._asyncio_future, wait_for_result_task], + return_when=asyncio.FIRST_COMPLETED, + ) + + if not self._generator_result_queue.empty(): + return self._generator_result_queue.get() + + if self._asyncio_future.exception(): + raise self._asyncio_future.exception() + + raise StopAsyncIteration + + def add_done_callback(self, callback: Callable): + self._future.add_done_callback(callback) + + def cancel(self): + self._future.cancel() + + def to_object_ref(self, timeout_s: Optional[float]) -> ray.ObjectRef: + raise self.OBJ_REF_NOT_SUPPORTED_ERROR + + async def to_object_ref_async(self) -> ray.ObjectRef: + raise self.OBJ_REF_NOT_SUPPORTED_ERROR + + def to_object_ref_gen(self) -> ray.ObjectRefGenerator: + raise self.OBJ_REF_NOT_SUPPORTED_ERROR + + +class LocalRouter(Router): + def __init__( + self, + user_callable_wrapper: UserCallableWrapper, + deployment_id: DeploymentID, + handle_options: Any, + ): + self._deployment_id = deployment_id + self._user_callable_wrapper = user_callable_wrapper + assert ( + self._user_callable_wrapper._callable is not None + ), "User callable must already be initialized." + + def running_replicas_populated(self) -> bool: + return True + + def _resolve_deployment_responses( + self, request_args: Tuple[Any], request_kwargs: Dict[str, Any] + ) -> Tuple[Tuple[Any], Dict[str, Any]]: + """Replace DeploymentResponse objects with their results. + + NOTE(edoakes): this currently calls the blocking `.result()` method + on the responses to resolve them to their values. This is a divergence + from the remote codepath where they're resolved concurrently. + """ + + def _new_arg(arg: Any) -> Any: + if isinstance(arg, DeploymentResponse): + new_arg = arg.result(_skip_asyncio_check=True) + elif isinstance(arg, DeploymentResponseGenerator): + raise GENERATOR_COMPOSITION_NOT_SUPPORTED_ERROR + else: + new_arg = arg + + return new_arg + + # Serialize and deserialize the arguments to mimic remote call behavior. + return cloudpickle.loads( + cloudpickle.dumps( + ( + tuple(_new_arg(arg) for arg in request_args), + {k: _new_arg(v) for k, v in request_kwargs.items()}, + ) + ) + ) + + def assign_request( + self, + request_meta: RequestMetadata, + *request_args, + **request_kwargs, + ) -> concurrent.futures.Future[LocalReplicaResult]: + request_args, request_kwargs = self._resolve_deployment_responses( + request_args, request_kwargs + ) + + if request_meta.is_streaming: + generator_result_queue = queue.Queue() + + def generator_result_callback(item: Any): + generator_result_queue.put_nowait(item) + + else: + generator_result_queue = None + generator_result_callback = None + + # Conform to the router interface of returning a future to the ReplicaResult. + noop_future = concurrent.futures.Future() + noop_future.set_result( + LocalReplicaResult( + self._user_callable_wrapper.call_user_method( + request_meta, + request_args, + request_kwargs, + generator_result_callback=generator_result_callback, + ), + request_id=request_meta.request_id, + is_streaming=request_meta.is_streaming, + generator_result_queue=generator_result_queue, + ) + ) + return noop_future + + def shutdown(self): + noop_future = concurrent.futures.Future() + noop_future.set_result(None) + return noop_future diff --git a/.venv/lib/python3.11/site-packages/ray/serve/_private/proxy_router.py b/.venv/lib/python3.11/site-packages/ray/serve/_private/proxy_router.py new file mode 100644 index 0000000000000000000000000000000000000000..7c40cee8b418225859c77adcd41703e5c1440608 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/ray/serve/_private/proxy_router.py @@ -0,0 +1,165 @@ +import logging +from typing import Callable, Dict, List, Optional, Tuple + +from ray.serve._private.common import ApplicationName, DeploymentID, EndpointInfo +from ray.serve._private.constants import SERVE_LOGGER_NAME +from ray.serve.handle import DeploymentHandle + +logger = logging.getLogger(SERVE_LOGGER_NAME) + +NO_ROUTES_MESSAGE = "Route table is not populated yet." +NO_REPLICAS_MESSAGE = "No replicas are available yet." + + +class ProxyRouter: + """Router interface for the proxy to use.""" + + def __init__( + self, + get_handle: Callable[[str, str], DeploymentHandle], + ): + # Function to get a handle given a name. Used to mock for testing. + self._get_handle = get_handle + # Contains a ServeHandle for each endpoint. + self.handles: Dict[DeploymentID, DeploymentHandle] = dict() + # Flipped to `True` once the route table has been updated at least once. + # The proxy router is not ready for traffic until the route table is populated + self._route_table_populated = False + + # Info used for HTTP proxy + # Routes sorted in order of decreasing length. + self.sorted_routes: List[str] = list() + # Endpoints associated with the routes. + self.route_info: Dict[str, DeploymentID] = dict() + # Map of application name to is_cross_language. + self.app_to_is_cross_language: Dict[ApplicationName, bool] = dict() + + # Info used for gRPC proxy + # Endpoints info associated with endpoints. + self.endpoints: Dict[DeploymentID, EndpointInfo] = dict() + + def ready_for_traffic(self, is_head: bool) -> Tuple[bool, str]: + """Whether the proxy router is ready to serve traffic. + + The first return value will be false if any of the following hold: + - The route table has not been populated yet with a non-empty set of routes + - The route table has been populated, but none of the handles + have received running replicas yet AND it lives on a worker node. + + Otherwise, the first return value will be true. + """ + + if not self._route_table_populated: + return False, NO_ROUTES_MESSAGE + + # NOTE(zcin): For the proxy on the head node, even if none of its handles have + # been populated with running replicas yet, we MUST mark the proxy as ready for + # traffic. This is to handle the case when all deployments have scaled to zero. + # If the deployments (more precisely, ingress deployments) have all scaled down + # to zero, at least one proxy needs to be able to receive incoming requests to + # trigger upscale. + if is_head: + return True, "" + + for handle in self.handles.values(): + if handle.running_replicas_populated(): + return True, "" + + return False, NO_REPLICAS_MESSAGE + + def update_routes(self, endpoints: Dict[DeploymentID, EndpointInfo]): + logger.info( + f"Got updated endpoints: {endpoints}.", extra={"log_to_stderr": True} + ) + if endpoints: + self._route_table_populated = True + + self.endpoints = endpoints + + existing_handles = set(self.handles.keys()) + routes = [] + route_info = {} + app_to_is_cross_language = {} + for endpoint, info in endpoints.items(): + routes.append(info.route) + route_info[info.route] = endpoint + app_to_is_cross_language[endpoint.app_name] = info.app_is_cross_language + if endpoint in self.handles: + existing_handles.remove(endpoint) + else: + self.handles[endpoint] = self._get_handle(endpoint, info) + + # Clean up any handles that are no longer used. + if len(existing_handles) > 0: + logger.info( + f"Deleting {len(existing_handles)} unused handles.", + extra={"log_to_stderr": False}, + ) + for endpoint in existing_handles: + del self.handles[endpoint] + + # Routes are sorted in order of decreasing length to enable longest + # prefix matching. + self.sorted_routes = sorted(routes, key=lambda x: len(x), reverse=True) + self.route_info = route_info + self.app_to_is_cross_language = app_to_is_cross_language + + def match_route( + self, target_route: str + ) -> Optional[Tuple[str, DeploymentHandle, bool]]: + """Return the longest prefix match among existing routes for the route. + Args: + target_route: route to match against. + Returns: + (route, handle, is_cross_language) if found, else None. + """ + + for route in self.sorted_routes: + if target_route.startswith(route): + matched = False + # If the route we matched on ends in a '/', then so does the + # target route and this must be a match. + if route.endswith("/"): + matched = True + # If the route we matched on doesn't end in a '/', we need to + # do another check to ensure that either this is an exact match + # or the next character in the target route is a '/'. This is + # to guard against the scenario where we have '/route' as a + # prefix and there's a request to '/routesuffix'. In this case, + # it should *not* be a match. + elif len(target_route) == len(route) or target_route[len(route)] == "/": + matched = True + + if matched: + endpoint = self.route_info[route] + return ( + route, + self.handles[endpoint], + self.app_to_is_cross_language[endpoint.app_name], + ) + + return None + + def get_handle_for_endpoint( + self, target_app_name: str + ) -> Optional[Tuple[str, DeploymentHandle, bool]]: + """Return the handle that matches with endpoint. + + Args: + target_app_name: app_name to match against. + Returns: + (route, handle, is_cross_language) for the single app if there + is only one, else find the app and handle for exact match. Else return None. + """ + for endpoint_tag, handle in self.handles.items(): + # If the target_app_name matches with the endpoint or if + # there is only one endpoint. + if target_app_name == endpoint_tag.app_name or len(self.handles) == 1: + endpoint_info = self.endpoints[endpoint_tag] + return ( + endpoint_info.route, + handle, + endpoint_info.app_is_cross_language, + ) + + return None diff --git a/.venv/lib/python3.11/site-packages/ray/serve/_private/proxy_state.py b/.venv/lib/python3.11/site-packages/ray/serve/_private/proxy_state.py new file mode 100644 index 0000000000000000000000000000000000000000..3325bbe0ee10c13185c5cda5d6e3117752838eb4 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/ray/serve/_private/proxy_state.py @@ -0,0 +1,789 @@ +import asyncio +import json +import logging +import os +from abc import ABC, abstractmethod +from typing import Dict, List, Optional, Set, Tuple, Type + +import ray +from ray import ObjectRef +from ray.actor import ActorHandle +from ray.exceptions import RayActorError +from ray.serve._private.cluster_node_info_cache import ClusterNodeInfoCache +from ray.serve._private.common import NodeId +from ray.serve._private.constants import ( + ASYNC_CONCURRENCY, + PROXY_DRAIN_CHECK_PERIOD_S, + PROXY_HEALTH_CHECK_PERIOD_S, + PROXY_HEALTH_CHECK_TIMEOUT_S, + PROXY_HEALTH_CHECK_UNHEALTHY_THRESHOLD, + PROXY_READY_CHECK_TIMEOUT_S, + RAY_SERVE_ALWAYS_RUN_PROXY_ON_HEAD_NODE, + RAY_SERVE_ENABLE_TASK_EVENTS, + SERVE_LOGGER_NAME, + SERVE_NAMESPACE, + SERVE_PROXY_NAME, +) +from ray.serve._private.proxy import ProxyActor +from ray.serve._private.utils import Timer, TimerBase, format_actor_name +from ray.serve.config import DeploymentMode, HTTPOptions, gRPCOptions +from ray.serve.schema import LoggingConfig, ProxyDetails, ProxyStatus +from ray.util.scheduling_strategies import NodeAffinitySchedulingStrategy + +logger = logging.getLogger(SERVE_LOGGER_NAME) + + +class ProxyWrapper(ABC): + @property + @abstractmethod + def actor_id(self) -> str: + """Return the actor id of the proxy actor.""" + raise NotImplementedError + + @abstractmethod + def is_ready(self, timeout_s: float) -> Optional[bool]: + """Return whether proxy is ready to be serving requests. + + Since actual readiness check is asynchronous, this method could return + any of the following statuses: + - None: Readiness check is pending + - True: Readiness check completed successfully (proxy is ready) + - False: Readiness check completed with failure (either timing out + or failing) + """ + raise NotImplementedError + + @abstractmethod + def is_healthy(self, timeout_s: float) -> Optional[bool]: + """Return whether the proxy actor is healthy. + + Since actual health-check is asynchronous, this method could return + either of the following statuses: + - None: Health-check is pending + - True: Health-check completed successfully (proxy is healthy) + - False: Health-check completed with failure (either timing out or failing) + """ + raise NotImplementedError + + @abstractmethod + def is_drained(self, timeout_s: float) -> Optional[bool]: + """Return whether the proxy actor is drained. + + Since actual check whether proxy is drained is asynchronous, this method could + return either of the following statuses: + - None: Drain-check is pending + - True: Drain-check completed, node *is drained* + - False: Drain-check completed, node is *NOT* drained + """ + raise NotImplementedError + + @abstractmethod + def is_shutdown(self): + """Return whether the proxy actor is shutdown.""" + raise NotImplementedError + + @abstractmethod + def update_draining(self, draining: bool): + """Update the draining status of the proxy actor.""" + raise NotImplementedError + + @abstractmethod + def kill(self): + """Kill the proxy actor.""" + raise NotImplementedError + + +class ActorProxyWrapper(ProxyWrapper): + def __init__( + self, + logging_config: LoggingConfig, + actor_handle: Optional[ActorHandle] = None, + config: Optional[HTTPOptions] = None, + grpc_options: Optional[gRPCOptions] = None, + name: Optional[str] = None, + node_id: Optional[str] = None, + node_ip_address: Optional[str] = None, + port: Optional[int] = None, + proxy_actor_class: Type[ProxyActor] = ProxyActor, + ): + # initialize with provided proxy actor handle or get or create a new one. + self._actor_handle = actor_handle or self._get_or_create_proxy_actor( + config=config, + grpc_options=grpc_options, + name=name, + node_id=node_id, + node_ip_address=node_ip_address, + port=port, + proxy_actor_class=proxy_actor_class, + logging_config=logging_config, + ) + self._ready_check_future = None + self._health_check_future = None + self._drained_check_future = None + + self._update_draining_obj_ref = None + + self._node_id = node_id + + self.worker_id = None + self.log_file_path = None + + @staticmethod + def _get_or_create_proxy_actor( + config: HTTPOptions, + grpc_options: gRPCOptions, + name: str, + node_id: str, + node_ip_address: str, + port: int, + logging_config: LoggingConfig, + proxy_actor_class: Type[ProxyActor] = ProxyActor, + ) -> ProxyWrapper: + """Helper to start or reuse existing proxy. + + Takes the name of the proxy, the node id, and the node ip address, and look up + or creates a new ProxyActor actor handle for the proxy. + """ + proxy = None + try: + proxy = ray.get_actor(name, namespace=SERVE_NAMESPACE) + except ValueError: + logger.info( + f"Starting proxy on node '{node_id}' " + f"listening on '{config.host}:{port}'.", + extra={"log_to_stderr": False}, + ) + + proxy = proxy or proxy_actor_class.options( + num_cpus=config.num_cpus, + name=name, + namespace=SERVE_NAMESPACE, + lifetime="detached", + max_concurrency=ASYNC_CONCURRENCY, + max_restarts=0, + scheduling_strategy=NodeAffinitySchedulingStrategy(node_id, soft=False), + enable_task_events=RAY_SERVE_ENABLE_TASK_EVENTS, + ).remote( + config.host, + port, + config.root_path, + node_ip_address=node_ip_address, + node_id=node_id, + http_middlewares=config.middlewares, + request_timeout_s=config.request_timeout_s, + keep_alive_timeout_s=config.keep_alive_timeout_s, + grpc_options=grpc_options, + logging_config=logging_config, + ) + return proxy + + @property + def actor_id(self) -> str: + """Return the actor id of the proxy actor.""" + return self._actor_handle._actor_id.hex() + + @property + def actor_handle(self) -> ActorHandle: + """Return the actor handle of the proxy actor. + + This is used in _start_controller() in _private/controller.py to check whether + the proxies exist. It is also used in some tests to access proxy's actor handle. + """ + return self._actor_handle + + def is_ready(self, timeout_s: float) -> Optional[bool]: + if self._ready_check_future is None: + self._ready_check_future = wrap_as_future( + self._actor_handle.ready.remote(), timeout_s=timeout_s + ) + + if not self._ready_check_future.done(): + return None + + try: + worker_id, log_file_path = json.loads(self._ready_check_future.result()) + self.worker_id = worker_id + self.log_file_path = log_file_path + return True + except TimeoutError: + logger.warning( + f"Proxy actor readiness check for proxy on {self._node_id}" + f" didn't complete in {timeout_s}s." + ) + except Exception: + logger.exception( + f"Unexpected error invoking readiness check for proxy" + f" on {self._node_id}", + ) + finally: + self._ready_check_future = None + + return False + + def is_healthy(self, timeout_s: float) -> Optional[bool]: + if self._health_check_future is None: + self._health_check_future = wrap_as_future( + self._actor_handle.check_health.remote(), timeout_s=timeout_s + ) + + if not self._health_check_future.done(): + return None + + try: + # NOTE: Since `check_health` method is responding with nothing, sole + # purpose of fetching the result is to extract any potential + # exceptions + self._health_check_future.result() + return True + except TimeoutError: + logger.warning( + f"Didn't receive health check response for proxy" + f" on {self._node_id} after {timeout_s}s." + ) + except Exception: + logger.exception( + f"Unexpected error invoking health check for proxy " + f"on {self._node_id}", + ) + finally: + self._health_check_future = None + + return False + + def is_drained(self, timeout_s: float) -> Optional[bool]: + if self._drained_check_future is None: + self._drained_check_future = wrap_as_future( + self._actor_handle.is_drained.remote(), + timeout_s=timeout_s, + ) + + if not self._drained_check_future.done(): + return None + + try: + is_drained = self._drained_check_future.result() + return is_drained + except TimeoutError: + logger.warning( + f"Didn't receive drain check response for proxy" + f" on {self._node_id} after {timeout_s}s." + ) + except Exception: + logger.exception( + f"Unexpected error invoking drain-check for proxy " + f"on {self._node_id}", + ) + finally: + self._drained_check_future = None + + return False + + def is_shutdown(self) -> bool: + """Return whether the proxy actor is shutdown. + + If the actor is dead, the health check will return RayActorError. + """ + try: + ray.get(self._actor_handle.check_health.remote(), timeout=0) + except RayActorError: + # The actor is dead, so it's ready for shutdown. + return True + + # The actor is still alive, so it's not ready for shutdown. + return False + + def update_draining(self, draining: bool): + """Update the draining status of the proxy actor.""" + # NOTE: All update_draining calls are implicitly serialized, by specifying + # `ObjectRef` of the previous call + self._update_draining_obj_ref = self._actor_handle.update_draining.remote( + draining, _after=self._update_draining_obj_ref + ) + # In case of cancelled draining, make sure pending draining check is cancelled + # as well + if not draining: + future = self._drained_check_future + self._drained_check_future = None + if future: + future.cancel() + + def kill(self): + """Kill the proxy actor.""" + ray.kill(self._actor_handle, no_restart=True) + + +class ProxyState: + def __init__( + self, + actor_proxy_wrapper: ProxyWrapper, + actor_name: str, + node_id: str, + node_ip: str, + proxy_restart_count: int = 0, + timer: TimerBase = Timer(), + ): + self._actor_proxy_wrapper = actor_proxy_wrapper + self._actor_name = actor_name + self._node_id = node_id + self._status = ProxyStatus.STARTING + self._timer = timer + self._shutting_down = False + self._consecutive_health_check_failures: int = 0 + self._proxy_restart_count = proxy_restart_count + self._last_health_check_time: Optional[float] = None + self._last_drain_check_time: Optional[float] = None + + self._actor_details = ProxyDetails( + node_id=node_id, + node_ip=node_ip, + actor_id=self._actor_proxy_wrapper.actor_id, + actor_name=self._actor_name, + status=self._status, + ) + + @property + def actor_handle(self) -> ActorHandle: + return self._actor_proxy_wrapper.actor_handle + + @property + def actor_name(self) -> str: + return self._actor_name + + @property + def actor_id(self) -> str: + return self._actor_proxy_wrapper.actor_id + + @property + def status(self) -> ProxyStatus: + return self._status + + @property + def actor_details(self) -> ProxyDetails: + return self._actor_details + + @property + def proxy_restart_count(self) -> int: + return self._proxy_restart_count + + def _set_status(self, status: ProxyStatus) -> None: + """Sets _status and updates _actor_details with the new status. + + NOTE: This method should not be used directly, instead please + use `try_update_status` method + """ + self._status = status + self.update_actor_details(status=self._status) + + def try_update_status(self, status: ProxyStatus): + """Try update with the new status and only update when the conditions are met. + + Status will only set to UNHEALTHY after PROXY_HEALTH_CHECK_UNHEALTHY_THRESHOLD + consecutive failures. A warning will be logged when the status is set to + UNHEALTHY. Also, when status is set to HEALTHY, we will reset + self._consecutive_health_check_failures to 0. + """ + + if status == ProxyStatus.UNHEALTHY: + self._consecutive_health_check_failures += 1 + # Early return to skip setting UNHEALTHY status if there are still room for + # retry. + if ( + self._consecutive_health_check_failures + < PROXY_HEALTH_CHECK_UNHEALTHY_THRESHOLD + ): + return + else: + # If all retries have been exhausted and setting the status to + # UNHEALTHY, log a warning message to the user. + logger.warning( + f"Proxy {self._actor_name} failed the health check " + f"{self._consecutive_health_check_failures} times in a row, marking" + f" it unhealthy." + ) + else: + # Reset self._consecutive_health_check_failures when status is not + # UNHEALTHY + self._consecutive_health_check_failures = 0 + + self._set_status(status=status) + + def update_actor_details(self, **kwargs) -> None: + """Updates _actor_details with passed in kwargs.""" + details_kwargs = self._actor_details.dict() + details_kwargs.update(kwargs) + self._actor_details = ProxyDetails(**details_kwargs) + + def reconcile(self, draining: bool = False): + try: + self._reconcile_internal(draining) + except Exception as e: + self.try_update_status(ProxyStatus.UNHEALTHY) + logger.error( + "Unexpected error occurred when reconciling stae of " + f"proxy on node {self._node_id}", + exc_info=e, + ) + + def _reconcile_internal(self, draining: bool): + """Update the status of the current proxy. + + The state machine is: + STARTING -> HEALTHY or UNHEALTHY + HEALTHY -> DRAINING or UNHEALTHY + DRAINING -> HEALTHY or UNHEALTHY or DRAINED + + UNHEALTHY is a terminal state upon reaching which, Proxy is going to be + restarted by the controller + """ + if ( + self._shutting_down + or self._status == ProxyStatus.DRAINED + or self._status == ProxyStatus.UNHEALTHY + ): + return + + # Doing a linear backoff for the ready check timeout. + ready_check_timeout = ( + self.proxy_restart_count + 1 + ) * PROXY_READY_CHECK_TIMEOUT_S + + if self._status == ProxyStatus.STARTING: + is_ready_response = self._actor_proxy_wrapper.is_ready(ready_check_timeout) + if is_ready_response is not None: + if is_ready_response: + self.try_update_status(ProxyStatus.HEALTHY) + self.update_actor_details( + worker_id=self._actor_proxy_wrapper.worker_id, + log_file_path=self._actor_proxy_wrapper.log_file_path, + status=self._status, + ) + else: + self.try_update_status(ProxyStatus.UNHEALTHY) + logger.warning( + f"Proxy actor reported not ready on node {self._node_id}" + ) + else: + # At this point, the proxy is either in HEALTHY or DRAINING status. + assert self._status in {ProxyStatus.HEALTHY, ProxyStatus.DRAINING} + + should_check_health = self._last_health_check_time is None or ( + self._timer.time() - self._last_health_check_time + >= PROXY_HEALTH_CHECK_PERIOD_S + ) + # Perform health-check for proxy's actor (if necessary) + if should_check_health: + is_healthy_response = self._actor_proxy_wrapper.is_healthy( + PROXY_HEALTH_CHECK_TIMEOUT_S + ) + if is_healthy_response is not None: + if is_healthy_response: + # At this stage status is either HEALTHY or DRAINING, and here + # we simply reset the status + self.try_update_status(self._status) + else: + self.try_update_status(ProxyStatus.UNHEALTHY) + + self._last_health_check_time = self._timer.time() + + # Handle state transitions (if necessary) + if self._status == ProxyStatus.UNHEALTHY: + return + elif self._status == ProxyStatus.HEALTHY: + if draining: + logger.info(f"Draining proxy on node '{self._node_id}'.") + assert self._last_drain_check_time is None + + self._actor_proxy_wrapper.update_draining(draining=True) + self.try_update_status(ProxyStatus.DRAINING) + elif self._status == ProxyStatus.DRAINING: + if not draining: + logger.info(f"No longer draining proxy on node '{self._node_id}'.") + self._last_drain_check_time = None + + self._actor_proxy_wrapper.update_draining(draining=False) + self.try_update_status(ProxyStatus.HEALTHY) + else: + should_check_drain = self._last_drain_check_time is None or ( + self._timer.time() - self._last_drain_check_time + >= PROXY_DRAIN_CHECK_PERIOD_S + ) + if should_check_drain: + # NOTE: We use the same timeout as for readiness checking + is_drained_response = self._actor_proxy_wrapper.is_drained( + PROXY_READY_CHECK_TIMEOUT_S + ) + if is_drained_response is not None: + if is_drained_response: + self.try_update_status(ProxyStatus.DRAINED) + + self._last_drain_check_time = self._timer.time() + + def shutdown(self): + self._shutting_down = True + self._actor_proxy_wrapper.kill() + + def is_ready_for_shutdown(self) -> bool: + """Return whether the proxy actor is shutdown. + + For a proxy actor to be considered shutdown, it must be marked as + _shutting_down and the actor must be shut down. + """ + if not self._shutting_down: + return False + + return self._actor_proxy_wrapper.is_shutdown() + + +class ProxyStateManager: + """Manages all state for proxies in the system. + + This class is *not* thread safe, so any state-modifying methods should be + called with a lock held. + """ + + def __init__( + self, + config: HTTPOptions, + head_node_id: str, + cluster_node_info_cache: ClusterNodeInfoCache, + logging_config: LoggingConfig, + grpc_options: Optional[gRPCOptions] = None, + proxy_actor_class: Type[ProxyActor] = ProxyActor, + actor_proxy_wrapper_class: Type[ProxyWrapper] = ActorProxyWrapper, + timer: TimerBase = Timer(), + ): + self.logging_config = logging_config + if config is not None: + self._config = config + else: + self._config = HTTPOptions() + self._grpc_options = grpc_options or gRPCOptions() + self._proxy_states: Dict[NodeId, ProxyState] = dict() + self._proxy_restart_counts: Dict[NodeId, int] = dict() + self._head_node_id: str = head_node_id + self._proxy_actor_class = proxy_actor_class + self._actor_proxy_wrapper_class = actor_proxy_wrapper_class + self._timer = timer + + self._cluster_node_info_cache = cluster_node_info_cache + + assert isinstance(head_node_id, str) + + def reconfigure_logging_config(self, logging_config: LoggingConfig): + self.logging_config = logging_config + + def shutdown(self) -> None: + for proxy_state in self._proxy_states.values(): + proxy_state.shutdown() + + def is_ready_for_shutdown(self) -> bool: + """Return whether all proxies are shutdown. + + Iterate through all proxy states and check if all their proxy actors + are shutdown. + """ + return all( + proxy_state.is_ready_for_shutdown() + for proxy_state in self._proxy_states.values() + ) + + def get_config(self) -> HTTPOptions: + return self._config + + def get_grpc_config(self) -> gRPCOptions: + return self._grpc_options + + def get_proxy_handles(self) -> Dict[NodeId, ActorHandle]: + return { + node_id: state.actor_handle for node_id, state in self._proxy_states.items() + } + + def get_proxy_names(self) -> Dict[NodeId, str]: + return { + node_id: state.actor_name for node_id, state in self._proxy_states.items() + } + + def get_proxy_details(self) -> Dict[NodeId, ProxyDetails]: + return { + node_id: state.actor_details + for node_id, state in self._proxy_states.items() + } + + def get_alive_proxy_actor_ids(self) -> Set[str]: + return {state.actor_id for state in self._proxy_states.values()} + + def update(self, proxy_nodes: Set[NodeId] = None) -> Set[str]: + """Update the state of all proxies. + + Start proxies on all nodes if not already exist and stop the proxies on nodes + that are no longer exist. Update all proxy states. Kill and restart + unhealthy proxies. + """ + if proxy_nodes is None: + proxy_nodes = set() + + # Ensure head node always has a proxy (unless FF'd off). + if RAY_SERVE_ALWAYS_RUN_PROXY_ON_HEAD_NODE: + proxy_nodes.add(self._head_node_id) + + target_nodes = self._get_target_nodes(proxy_nodes) + target_node_ids = {node_id for node_id, _ in target_nodes} + + for node_id, proxy_state in self._proxy_states.items(): + draining = node_id not in target_node_ids + proxy_state.reconcile(draining) + + self._stop_proxies_if_needed() + self._start_proxies_if_needed(target_nodes) + + def _get_target_nodes(self, proxy_nodes) -> List[Tuple[str, str]]: + """Return the list of (node_id, ip_address) to deploy HTTP and gRPC servers + on.""" + location = self._config.location + + if location == DeploymentMode.NoServer: + return [] + + target_nodes = [ + (node_id, ip_address) + for node_id, ip_address in self._cluster_node_info_cache.get_alive_nodes() + if node_id in proxy_nodes + ] + + if location == DeploymentMode.HeadOnly: + nodes = [ + (node_id, ip_address) + for node_id, ip_address in target_nodes + if node_id == self._head_node_id + ] + assert len(nodes) == 1, ( + f"Head node not found! Head node id: {self._head_node_id}, " + f"all nodes: {target_nodes}." + ) + return nodes + + return target_nodes + + def _generate_actor_name(self, node_id: str) -> str: + return format_actor_name(SERVE_PROXY_NAME, node_id) + + def _start_proxy( + self, + name: str, + node_id: str, + node_ip_address: str, + ) -> ProxyWrapper: + """Helper to start or reuse existing proxy and wrap in the proxy actor wrapper. + + Compute the HTTP port based on `TEST_WORKER_NODE_HTTP_PORT` env var and gRPC + port based on `TEST_WORKER_NODE_GRPC_PORT` env var. Passed all the required + variables into the proxy actor wrapper class and return the proxy actor wrapper. + """ + port = self._config.port + grpc_options = self._grpc_options + + if ( + node_id != self._head_node_id + and os.getenv("TEST_WORKER_NODE_HTTP_PORT") is not None + ): + logger.warning( + f"`TEST_WORKER_NODE_HTTP_PORT` env var is set. " + f"Using it for worker node {node_id}." + ) + port = int(os.getenv("TEST_WORKER_NODE_HTTP_PORT")) + + if ( + node_id != self._head_node_id + and os.getenv("TEST_WORKER_NODE_GRPC_PORT") is not None + ): + logger.warning( + f"`TEST_WORKER_NODE_GRPC_PORT` env var is set. " + f"Using it for worker node {node_id}." + f"{int(os.getenv('TEST_WORKER_NODE_GRPC_PORT'))}" + ) + grpc_options.port = int(os.getenv("TEST_WORKER_NODE_GRPC_PORT")) + + return self._actor_proxy_wrapper_class( + logging_config=self.logging_config, + config=self._config, + grpc_options=grpc_options, + name=name, + node_id=node_id, + node_ip_address=node_ip_address, + port=port, + proxy_actor_class=self._proxy_actor_class, + ) + + def _start_proxies_if_needed(self, target_nodes) -> None: + """Start a proxy on every node if it doesn't already exist.""" + + for node_id, node_ip_address in target_nodes: + if node_id in self._proxy_states: + continue + + name = self._generate_actor_name(node_id=node_id) + actor_proxy_wrapper = self._start_proxy( + name=name, + node_id=node_id, + node_ip_address=node_ip_address, + ) + + self._proxy_states[node_id] = ProxyState( + actor_proxy_wrapper=actor_proxy_wrapper, + actor_name=name, + node_id=node_id, + node_ip=node_ip_address, + proxy_restart_count=self._proxy_restart_counts.get(node_id, 0), + timer=self._timer, + ) + + def _stop_proxies_if_needed(self) -> bool: + """Removes proxy actors. + + Removes proxy actors from any nodes that no longer exist or unhealthy proxy. + """ + alive_node_ids = self._cluster_node_info_cache.get_alive_node_ids() + to_stop = [] + for node_id, proxy_state in self._proxy_states.items(): + if node_id not in alive_node_ids: + logger.info(f"Removing proxy on removed node '{node_id}'.") + to_stop.append(node_id) + elif proxy_state.status == ProxyStatus.UNHEALTHY: + logger.info( + f"Proxy on node '{node_id}' is unhealthy. Shutting down " + "the unhealthy proxy and starting a new one." + ) + to_stop.append(node_id) + elif proxy_state.status == ProxyStatus.DRAINED: + logger.info(f"Removing drained proxy on node '{node_id}'.") + to_stop.append(node_id) + + for node_id in to_stop: + proxy_state = self._proxy_states.pop(node_id) + self._proxy_restart_counts[node_id] = proxy_state.proxy_restart_count + 1 + proxy_state.shutdown() + + +def _try_set_exception(fut: asyncio.Future, e: Exception): + if not fut.done(): + fut.set_exception(e) + + +def wrap_as_future(ref: ObjectRef, timeout_s: Optional[float] = None) -> asyncio.Future: + loop = asyncio.get_running_loop() + + aio_fut = asyncio.wrap_future(ref.future()) + + if timeout_s is not None: + assert timeout_s >= 0, "Timeout value should be non-negative" + # Schedule handler to complete future exceptionally + timeout_handler = loop.call_later( + max(timeout_s, 0), + _try_set_exception, + aio_fut, + TimeoutError(f"Future cancelled after timeout {timeout_s}s"), + ) + # Cancel timeout handler upon completion of the future + aio_fut.add_done_callback(lambda _: timeout_handler.cancel()) + + return aio_fut diff --git a/.venv/lib/python3.11/site-packages/ray/serve/_private/replica.py b/.venv/lib/python3.11/site-packages/ray/serve/_private/replica.py new file mode 100644 index 0000000000000000000000000000000000000000..7d8c2e40467353fef0d619e2ea71ad7d2cf27c46 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/ray/serve/_private/replica.py @@ -0,0 +1,1682 @@ +import asyncio +import concurrent.futures +import functools +import inspect +import logging +import os +import pickle +import threading +import time +import traceback +import warnings +from abc import ABC, abstractmethod +from collections import defaultdict, deque +from contextlib import contextmanager +from dataclasses import dataclass +from importlib import import_module +from typing import ( + Any, + AsyncGenerator, + Callable, + Dict, + Generator, + Optional, + Tuple, + Union, +) + +import starlette.responses +from anyio import to_thread +from starlette.types import ASGIApp, Message + +import ray +from ray import cloudpickle +from ray._private.utils import get_or_create_event_loop +from ray.actor import ActorClass, ActorHandle +from ray.remote_function import RemoteFunction +from ray.serve import metrics +from ray.serve._private.common import ( + DeploymentID, + ReplicaID, + ReplicaQueueLengthInfo, + RequestMetadata, + ServeComponentType, + StreamingHTTPRequest, + gRPCRequest, +) +from ray.serve._private.config import DeploymentConfig +from ray.serve._private.constants import ( + DEFAULT_LATENCY_BUCKET_MS, + GRPC_CONTEXT_ARG_NAME, + HEALTH_CHECK_METHOD, + RAY_SERVE_COLLECT_AUTOSCALING_METRICS_ON_HANDLE, + RAY_SERVE_METRICS_EXPORT_INTERVAL_MS, + RAY_SERVE_REPLICA_AUTOSCALING_METRIC_RECORD_PERIOD_S, + RAY_SERVE_RUN_SYNC_IN_THREADPOOL, + RAY_SERVE_RUN_SYNC_IN_THREADPOOL_WARNING, + RECONFIGURE_METHOD, + SERVE_CONTROLLER_NAME, + SERVE_LOGGER_NAME, + SERVE_NAMESPACE, +) +from ray.serve._private.default_impl import create_replica_impl +from ray.serve._private.http_util import ( + ASGIAppReplicaWrapper, + ASGIArgs, + ASGIReceiveProxy, + MessageQueue, + Response, +) +from ray.serve._private.logging_utils import ( + access_log_msg, + configure_component_cpu_profiler, + configure_component_logger, + configure_component_memory_profiler, + get_component_logger_file_path, +) +from ray.serve._private.metrics_utils import InMemoryMetricsStore, MetricsPusher +from ray.serve._private.thirdparty.get_asgi_route_name import get_asgi_route_name +from ray.serve._private.utils import get_component_file_name # noqa: F401 +from ray.serve._private.utils import parse_import_path +from ray.serve._private.version import DeploymentVersion +from ray.serve.config import AutoscalingConfig +from ray.serve.deployment import Deployment +from ray.serve.exceptions import RayServeException +from ray.serve.schema import LoggingConfig + +logger = logging.getLogger(SERVE_LOGGER_NAME) + + +def _load_deployment_def_from_import_path(import_path: str) -> Callable: + module_name, attr_name = parse_import_path(import_path) + deployment_def = getattr(import_module(module_name), attr_name) + + # For ray or serve decorated class or function, strip to return + # original body. + if isinstance(deployment_def, RemoteFunction): + deployment_def = deployment_def._function + elif isinstance(deployment_def, ActorClass): + deployment_def = deployment_def.__ray_metadata__.modified_class + elif isinstance(deployment_def, Deployment): + logger.warning( + f'The import path "{import_path}" contains a ' + "decorated Serve deployment. The decorator's settings " + "are ignored when deploying via import path." + ) + deployment_def = deployment_def.func_or_class + + return deployment_def + + +class ReplicaMetricsManager: + """Manages metrics for the replica. + + A variety of metrics are managed: + - Fine-grained metrics are set for every request. + - Autoscaling statistics are periodically pushed to the controller. + - Queue length metrics are periodically recorded as user-facing gauges. + """ + + PUSH_METRICS_TO_CONTROLLER_TASK_NAME = "push_metrics_to_controller" + RECORD_METRICS_TASK_NAME = "record_metrics" + SET_REPLICA_REQUEST_METRIC_GAUGE_TASK_NAME = "set_replica_request_metric_gauge" + + def __init__( + self, + replica_id: ReplicaID, + event_loop: asyncio.BaseEventLoop, + autoscaling_config: Optional[AutoscalingConfig], + ): + self._replica_id = replica_id + self._metrics_pusher = MetricsPusher() + self._metrics_store = InMemoryMetricsStore() + self._autoscaling_config = autoscaling_config + self._controller_handle = ray.get_actor( + SERVE_CONTROLLER_NAME, namespace=SERVE_NAMESPACE + ) + self._num_ongoing_requests = 0 + + # If the interval is set to 0, eagerly sets all metrics. + self._cached_metrics_enabled = RAY_SERVE_METRICS_EXPORT_INTERVAL_MS != 0 + self._cached_metrics_interval_s = RAY_SERVE_METRICS_EXPORT_INTERVAL_MS / 1000 + + # Request counter (only set on replica startup). + self._restart_counter = metrics.Counter( + "serve_deployment_replica_starts", + description=( + "The number of times this replica has been restarted due to failure." + ), + ) + self._restart_counter.inc() + + # Per-request metrics. + self._request_counter = metrics.Counter( + "serve_deployment_request_counter", + description=( + "The number of queries that have been processed in this replica." + ), + tag_keys=("route",), + ) + if self._cached_metrics_enabled: + self._cached_request_counter = defaultdict(int) + + self._error_counter = metrics.Counter( + "serve_deployment_error_counter", + description=( + "The number of exceptions that have occurred in this replica." + ), + tag_keys=("route",), + ) + if self._cached_metrics_enabled: + self._cached_error_counter = defaultdict(int) + + self._processing_latency_tracker = metrics.Histogram( + "serve_deployment_processing_latency_ms", + description="The latency for queries to be processed.", + boundaries=DEFAULT_LATENCY_BUCKET_MS, + tag_keys=("route",), + ) + if self._cached_metrics_enabled: + self._cached_latencies = defaultdict(deque) + + self._num_ongoing_requests_gauge = metrics.Gauge( + "serve_replica_processing_queries", + description="The current number of queries being processed.", + ) + + self.set_autoscaling_config(autoscaling_config) + + if self._cached_metrics_enabled: + event_loop.create_task(self._report_cached_metrics_forever()) + + def _report_cached_metrics(self): + for route, count in self._cached_request_counter.items(): + self._request_counter.inc(count, tags={"route": route}) + self._cached_request_counter.clear() + + for route, count in self._cached_error_counter.items(): + self._error_counter.inc(count, tags={"route": route}) + self._cached_error_counter.clear() + + for route, latencies in self._cached_latencies.items(): + for latency_ms in latencies: + self._processing_latency_tracker.observe( + latency_ms, tags={"route": route} + ) + self._cached_latencies.clear() + + self._num_ongoing_requests_gauge.set(self._num_ongoing_requests) + + async def _report_cached_metrics_forever(self): + assert self._cached_metrics_interval_s > 0 + + consecutive_errors = 0 + while True: + try: + await asyncio.sleep(self._cached_metrics_interval_s) + self._report_cached_metrics() + consecutive_errors = 0 + except Exception: + logger.exception("Unexpected error reporting metrics.") + + # Exponential backoff starting at 1s and capping at 10s. + backoff_time_s = min(10, 2**consecutive_errors) + consecutive_errors += 1 + await asyncio.sleep(backoff_time_s) + + async def shutdown(self): + """Stop periodic background tasks.""" + + await self._metrics_pusher.graceful_shutdown() + + def set_autoscaling_config(self, autoscaling_config: Optional[AutoscalingConfig]): + """Dynamically update autoscaling config.""" + + self._autoscaling_config = autoscaling_config + + if ( + not RAY_SERVE_COLLECT_AUTOSCALING_METRICS_ON_HANDLE + and self._autoscaling_config + ): + self._metrics_pusher.start() + + # Push autoscaling metrics to the controller periodically. + self._metrics_pusher.register_or_update_task( + self.PUSH_METRICS_TO_CONTROLLER_TASK_NAME, + self._push_autoscaling_metrics, + self._autoscaling_config.metrics_interval_s, + ) + # Collect autoscaling metrics locally periodically. + self._metrics_pusher.register_or_update_task( + self.RECORD_METRICS_TASK_NAME, + self._add_autoscaling_metrics_point, + min( + RAY_SERVE_REPLICA_AUTOSCALING_METRIC_RECORD_PERIOD_S, + self._autoscaling_config.metrics_interval_s, + ), + ) + + def inc_num_ongoing_requests(self) -> int: + """Increment the current total queue length of requests for this replica.""" + self._num_ongoing_requests += 1 + if not self._cached_metrics_enabled: + self._num_ongoing_requests_gauge.set(self._num_ongoing_requests) + + def dec_num_ongoing_requests(self) -> int: + """Decrement the current total queue length of requests for this replica.""" + self._num_ongoing_requests -= 1 + if not self._cached_metrics_enabled: + self._num_ongoing_requests_gauge.set(self._num_ongoing_requests) + + def get_num_ongoing_requests(self) -> int: + """Get current total queue length of requests for this replica.""" + return self._num_ongoing_requests + + def record_request_metrics(self, *, route: str, latency_ms: float, was_error: bool): + """Records per-request metrics.""" + if self._cached_metrics_enabled: + self._cached_latencies[route].append(latency_ms) + if was_error: + self._cached_error_counter[route] += 1 + else: + self._cached_request_counter[route] += 1 + else: + self._processing_latency_tracker.observe(latency_ms, tags={"route": route}) + if was_error: + self._error_counter.inc(tags={"route": route}) + else: + self._request_counter.inc(tags={"route": route}) + + def _push_autoscaling_metrics(self) -> Dict[str, Any]: + look_back_period = self._autoscaling_config.look_back_period_s + self._controller_handle.record_autoscaling_metrics.remote( + replica_id=self._replica_id, + window_avg=self._metrics_store.window_average( + self._replica_id, time.time() - look_back_period + ), + send_timestamp=time.time(), + ) + + def _add_autoscaling_metrics_point(self) -> None: + self._metrics_store.add_metrics_point( + {self._replica_id: self._num_ongoing_requests}, + time.time(), + ) + + +StatusCodeCallback = Callable[[str], None] + + +class ReplicaBase(ABC): + def __init__( + self, + replica_id: ReplicaID, + deployment_def: Callable, + init_args: Tuple, + init_kwargs: Dict, + deployment_config: DeploymentConfig, + version: DeploymentVersion, + ): + self._version = version + self._replica_id = replica_id + self._deployment_id = replica_id.deployment_id + self._deployment_config = deployment_config + self._component_name = f"{self._deployment_id.name}" + if self._deployment_id.app_name: + self._component_name = ( + f"{self._deployment_id.app_name}_" + self._component_name + ) + + self._component_id = self._replica_id.unique_id + self._configure_logger_and_profilers(self._deployment_config.logging_config) + self._event_loop = get_or_create_event_loop() + + self._user_callable_wrapper = UserCallableWrapper( + deployment_def, + init_args, + init_kwargs, + deployment_id=self._deployment_id, + run_sync_methods_in_threadpool=RAY_SERVE_RUN_SYNC_IN_THREADPOOL, + ) + + # Guards against calling the user's callable constructor multiple times. + self._user_callable_initialized = False + self._user_callable_initialized_lock = asyncio.Lock() + self._initialization_latency: Optional[float] = None + + # Flipped to `True` once graceful shutdown is initiated. May be used by replica + # subclass implementations. + self._shutting_down = False + + # Will be populated with the wrapped ASGI app if the user callable is an + # `ASGIAppReplicaWrapper` (i.e., they are using the FastAPI integration). + self._user_callable_asgi_app: Optional[ASGIApp] = None + + # Set metadata for logs and metrics. + # servable_object will be populated in `initialize_and_get_metadata`. + self._set_internal_replica_context(servable_object=None) + + self._metrics_manager = ReplicaMetricsManager( + replica_id, + self._event_loop, + self._deployment_config.autoscaling_config, + ) + + self._port: Optional[int] = None + + def _set_internal_replica_context(self, *, servable_object: Callable = None): + ray.serve.context._set_internal_replica_context( + replica_id=self._replica_id, + servable_object=servable_object, + _deployment_config=self._deployment_config, + ) + + def _configure_logger_and_profilers( + self, logging_config: Union[None, Dict, LoggingConfig] + ): + + if logging_config is None: + logging_config = {} + if isinstance(logging_config, dict): + logging_config = LoggingConfig(**logging_config) + + configure_component_logger( + component_type=ServeComponentType.REPLICA, + component_name=self._component_name, + component_id=self._component_id, + logging_config=logging_config, + ) + configure_component_memory_profiler( + component_type=ServeComponentType.REPLICA, + component_name=self._component_name, + component_id=self._component_id, + ) + self.cpu_profiler, self.cpu_profiler_log = configure_component_cpu_profiler( + component_type=ServeComponentType.REPLICA, + component_name=self._component_name, + component_id=self._component_id, + ) + + def get_num_ongoing_requests(self): + return self._metrics_manager.get_num_ongoing_requests() + + def _maybe_get_http_route( + self, request_metadata: RequestMetadata, request_args: Tuple[Any] + ) -> Optional[str]: + """Get the matched route string for ASGI apps to be used in logs & metrics. + + If this replica does not wrap an ASGI app or there is no matching for the + request, returns the existing route from the request metadata. + """ + route = request_metadata.route + if ( + request_metadata.is_http_request + and self._user_callable_asgi_app is not None + ): + req: StreamingHTTPRequest = request_args[0] + try: + matched_route = get_asgi_route_name( + self._user_callable_asgi_app, req.asgi_scope + ) + except Exception: + matched_route = None + logger.exception( + "Failed unexpectedly trying to get route name for request. " + "Routes in metric tags and log messages may be inaccurate. " + "Please file a GitHub issue containing this traceback." + ) + + # If there is no match in the ASGI app, don't overwrite the route_prefix + # from the proxy. + if matched_route is not None: + route = matched_route + + return route + + def _maybe_get_http_method( + self, request_metadata: RequestMetadata, request_args: Tuple[Any] + ) -> Optional[str]: + """Get the HTTP method to be used in logs & metrics. + + If this is not an HTTP request, returns None. + """ + if request_metadata.is_http_request: + req: StreamingHTTPRequest = request_args[0] + # WebSocket messages don't have a 'method' field. + return req.asgi_scope.get("method", "WS") + + return None + + @contextmanager + def _handle_errors_and_metrics( + self, request_metadata: RequestMetadata, request_args: Tuple[Any] + ) -> Generator[StatusCodeCallback, None, None]: + start_time = time.time() + user_exception = None + + status_code = None + + def _status_code_callback(s: str): + nonlocal status_code + status_code = s + + try: + self._metrics_manager.inc_num_ongoing_requests() + yield _status_code_callback + except asyncio.CancelledError as e: + user_exception = e + self._on_request_cancelled(request_metadata, e) + except Exception as e: + user_exception = e + logger.exception("Request failed.") + self._on_request_failed(request_metadata, e) + finally: + self._metrics_manager.dec_num_ongoing_requests() + + latency_ms = (time.time() - start_time) * 1000 + if user_exception is None: + status_str = "OK" + elif isinstance(user_exception, asyncio.CancelledError): + status_str = "CANCELLED" + else: + status_str = "ERROR" + + http_method = self._maybe_get_http_method(request_metadata, request_args) + http_route = request_metadata.route + # Set in _wrap_user_method_call. + logger.info( + access_log_msg( + method=http_method or "CALL", + route=http_route or request_metadata.call_method, + # Prefer the HTTP status code if it was populated. + status=status_code or status_str, + latency_ms=latency_ms, + ), + extra={"serve_access_log": True}, + ) + self._metrics_manager.record_request_metrics( + route=http_route, + latency_ms=latency_ms, + was_error=user_exception is not None, + ) + + if user_exception is not None: + raise user_exception from None + + async def _call_user_generator( + self, + request_metadata: RequestMetadata, + request_args: Tuple[Any], + request_kwargs: Dict[str, Any], + status_code_callback: StatusCodeCallback, + ) -> AsyncGenerator[Any, None]: + """Calls a user method for a streaming call and yields its results. + + The user method is called in an asyncio `Task` and places its results on a + `result_queue`. This method pulls and yields from the `result_queue`. + """ + call_user_method_future = None + wait_for_message_task = None + try: + result_queue = MessageQueue() + + # `asyncio.Event`s are not thread safe, so `call_soon_threadsafe` must be + # used to interact with the result queue from the user callable thread. + def _enqueue_thread_safe(item: Any): + self._event_loop.call_soon_threadsafe(result_queue.put_nowait, item) + + call_user_method_future = asyncio.wrap_future( + self._user_callable_wrapper.call_user_method( + request_metadata, + request_args, + request_kwargs, + generator_result_callback=_enqueue_thread_safe, + ) + ) + + first_message_peeked = False + while True: + wait_for_message_task = self._event_loop.create_task( + result_queue.wait_for_message() + ) + done, _ = await asyncio.wait( + [call_user_method_future, wait_for_message_task], + return_when=asyncio.FIRST_COMPLETED, + ) + + # Consume and yield all available messages in the queue. + messages = result_queue.get_messages_nowait() + if messages: + # HTTP (ASGI) messages are only consumed by the proxy so batch them + # and use vanilla pickle (we know it's safe because these messages + # only contain primitive Python types). + if request_metadata.is_http_request: + # Peek the first ASGI message to determine the status code. + if not first_message_peeked: + msg = messages[0] + first_message_peeked = True + if msg["type"] == "http.response.start": + # HTTP responses begin with exactly one + # "http.response.start" message containing the "status" + # field. Other response types like WebSockets may not. + status_code_callback(str(msg["status"])) + + yield pickle.dumps(messages) + else: + for msg in messages: + yield msg + + # Exit once `call_user_method` has finished. In this case, all + # messages must have already been sent. + if call_user_method_future in done: + break + + e = call_user_method_future.exception() + if e is not None: + raise e from None + finally: + if ( + call_user_method_future is not None + and not call_user_method_future.done() + ): + call_user_method_future.cancel() + + if wait_for_message_task is not None and not wait_for_message_task.done(): + wait_for_message_task.cancel() + + async def handle_request( + self, request_metadata: RequestMetadata, *request_args, **request_kwargs + ) -> Tuple[bytes, Any]: + with self._wrap_user_method_call(request_metadata, request_args): + return await asyncio.wrap_future( + self._user_callable_wrapper.call_user_method( + request_metadata, request_args, request_kwargs + ) + ) + + async def handle_request_streaming( + self, request_metadata: RequestMetadata, *request_args, **request_kwargs + ) -> AsyncGenerator[Any, None]: + """Generator that is the entrypoint for all `stream=True` handle calls.""" + with self._wrap_user_method_call( + request_metadata, request_args + ) as status_code_callback: + async for result in self._call_user_generator( + request_metadata, + request_args, + request_kwargs, + status_code_callback=status_code_callback, + ): + yield result + + async def handle_request_with_rejection( + self, request_metadata: RequestMetadata, *request_args, **request_kwargs + ): + limit = self._deployment_config.max_ongoing_requests + num_ongoing_requests = self.get_num_ongoing_requests() + if num_ongoing_requests >= limit: + logger.warning( + f"Replica at capacity of max_ongoing_requests={limit}, " + f"rejecting request {request_metadata.request_id}.", + extra={"log_to_stderr": False}, + ) + yield ReplicaQueueLengthInfo( + accepted=False, num_ongoing_requests=num_ongoing_requests + ) + return + + with self._wrap_user_method_call( + request_metadata, request_args + ) as status_code_callback: + yield ReplicaQueueLengthInfo( + accepted=True, + # NOTE(edoakes): `_wrap_user_method_call` will increment the number + # of ongoing requests to include this one, so re-fetch the value. + num_ongoing_requests=self.get_num_ongoing_requests(), + ) + + if request_metadata.is_streaming: + async for result in self._call_user_generator( + request_metadata, + request_args, + request_kwargs, + status_code_callback=status_code_callback, + ): + yield result + else: + yield await asyncio.wrap_future( + self._user_callable_wrapper.call_user_method( + request_metadata, request_args, request_kwargs + ) + ) + + @abstractmethod + async def _on_initialized(self): + raise NotImplementedError + + async def initialize(self, deployment_config: DeploymentConfig): + try: + # Ensure that initialization is only performed once. + # When controller restarts, it will call this method again. + async with self._user_callable_initialized_lock: + self._initialization_start_time = time.time() + if not self._user_callable_initialized: + self._user_callable_asgi_app = await asyncio.wrap_future( + self._user_callable_wrapper.initialize_callable() + ) + await self._on_initialized() + self._user_callable_initialized = True + + if deployment_config: + await asyncio.wrap_future( + self._user_callable_wrapper.set_sync_method_threadpool_limit( + deployment_config.max_ongoing_requests + ) + ) + await asyncio.wrap_future( + self._user_callable_wrapper.call_reconfigure( + deployment_config.user_config + ) + ) + + # A new replica should not be considered healthy until it passes + # an initial health check. If an initial health check fails, + # consider it an initialization failure. + await self.check_health() + except Exception: + raise RuntimeError(traceback.format_exc()) from None + + async def reconfigure(self, deployment_config: DeploymentConfig): + try: + user_config_changed = ( + deployment_config.user_config != self._deployment_config.user_config + ) + logging_config_changed = ( + deployment_config.logging_config + != self._deployment_config.logging_config + ) + self._deployment_config = deployment_config + self._version = DeploymentVersion.from_deployment_version( + self._version, deployment_config + ) + + self._metrics_manager.set_autoscaling_config( + deployment_config.autoscaling_config + ) + if logging_config_changed: + self._configure_logger_and_profilers(deployment_config.logging_config) + + await asyncio.wrap_future( + self._user_callable_wrapper.set_sync_method_threadpool_limit( + deployment_config.max_ongoing_requests + ) + ) + if user_config_changed: + await asyncio.wrap_future( + self._user_callable_wrapper.call_reconfigure( + deployment_config.user_config + ) + ) + + # We need to update internal replica context to reflect the new + # deployment_config. + self._set_internal_replica_context( + servable_object=self._user_callable_wrapper.user_callable + ) + except Exception: + raise RuntimeError(traceback.format_exc()) from None + + def get_metadata( + self, + ) -> Tuple[DeploymentConfig, DeploymentVersion, Optional[float], Optional[int]]: + return ( + self._version.deployment_config, + self._version, + self._initialization_latency, + self._port, + ) + + @abstractmethod + def _on_request_cancelled( + self, request_metadata: RequestMetadata, e: asyncio.CancelledError + ): + pass + + @abstractmethod + def _on_request_failed(self, request_metadata: RequestMetadata, e: Exception): + pass + + @abstractmethod + @contextmanager + def _wrap_user_method_call( + self, request_metadata: RequestMetadata, request_args: Tuple[Any] + ) -> Generator[StatusCodeCallback, None, None]: + pass + + async def _drain_ongoing_requests(self): + """Wait for any ongoing requests to finish. + + Sleep for a grace period before the first time we check the number of ongoing + requests to allow the notification to remove this replica to propagate to + callers first. + """ + wait_loop_period_s = self._deployment_config.graceful_shutdown_wait_loop_s + while True: + await asyncio.sleep(wait_loop_period_s) + + num_ongoing_requests = self._metrics_manager.get_num_ongoing_requests() + if num_ongoing_requests > 0: + logger.info( + f"Waiting for an additional {wait_loop_period_s}s to shut down " + f"because there are {num_ongoing_requests} ongoing requests." + ) + else: + logger.info( + "Graceful shutdown complete; replica exiting.", + extra={"log_to_stderr": False}, + ) + break + + async def perform_graceful_shutdown(self): + self._shutting_down = True + + # If the replica was never initialized it never served traffic, so we + # can skip the wait period. + if self._user_callable_initialized: + await self._drain_ongoing_requests() + + try: + await asyncio.wrap_future(self._user_callable_wrapper.call_destructor()) + except: # noqa: E722 + # We catch a blanket exception since the constructor may still be + # running, so instance variables used by the destructor may not exist. + if self._user_callable_initialized: + logger.exception( + "__del__ ran before replica finished initializing, and " + "raised an exception." + ) + else: + logger.exception("__del__ raised an exception.") + + await self._metrics_manager.shutdown() + + async def check_health(self): + # If there's no user-defined health check, nothing runs on the user code event + # loop and no future is returned. + f: Optional[ + concurrent.futures.Future + ] = self._user_callable_wrapper.call_user_health_check() + if f is not None: + await asyncio.wrap_future(f) + + +class Replica(ReplicaBase): + async def _on_initialized(self): + self._set_internal_replica_context( + servable_object=self._user_callable_wrapper.user_callable + ) + + # Save the initialization latency if the replica is initializing + # for the first time. + if self._initialization_latency is None: + self._initialization_latency = time.time() - self._initialization_start_time + + def _on_request_cancelled( + self, request_metadata: RequestMetadata, e: asyncio.CancelledError + ): + """Recursively cancels child requests.""" + requests_pending_assignment = ( + ray.serve.context._get_requests_pending_assignment( + request_metadata.internal_request_id + ) + ) + for task in requests_pending_assignment.values(): + task.cancel() + + def _on_request_failed(self, request_metadata: RequestMetadata, e: Exception): + if ray.util.pdb._is_ray_debugger_post_mortem_enabled(): + ray.util.pdb._post_mortem() + + @contextmanager + def _wrap_user_method_call( + self, request_metadata: RequestMetadata, request_args: Tuple[Any] + ) -> Generator[StatusCodeCallback, None, None]: + """Context manager that wraps user method calls. + + 1) Sets the request context var with appropriate metadata. + 2) Records the access log message (if not disabled). + 3) Records per-request metrics via the metrics manager. + """ + request_metadata.route = self._maybe_get_http_route( + request_metadata, request_args + ) + ray.serve.context._serve_request_context.set( + ray.serve.context._RequestContext( + route=request_metadata.route, + request_id=request_metadata.request_id, + _internal_request_id=request_metadata.internal_request_id, + app_name=self._deployment_id.app_name, + multiplexed_model_id=request_metadata.multiplexed_model_id, + grpc_context=request_metadata.grpc_context, + ) + ) + + with self._handle_errors_and_metrics( + request_metadata, request_args + ) as status_code_callback: + yield status_code_callback + + +class ReplicaActor: + """Actor definition for replicas of Ray Serve deployments. + + This class defines the interface that the controller and deployment handles + (i.e., from proxies and other replicas) use to interact with a replica. + + All interaction with the user-provided callable is done via the + `UserCallableWrapper` class. + """ + + async def __init__( + self, + replica_id: ReplicaID, + serialized_deployment_def: bytes, + serialized_init_args: bytes, + serialized_init_kwargs: bytes, + deployment_config_proto_bytes: bytes, + version: DeploymentVersion, + ): + deployment_config = DeploymentConfig.from_proto_bytes( + deployment_config_proto_bytes + ) + deployment_def = cloudpickle.loads(serialized_deployment_def) + if isinstance(deployment_def, str): + deployment_def = _load_deployment_def_from_import_path(deployment_def) + + self._replica_impl: ReplicaBase = create_replica_impl( + replica_id=replica_id, + deployment_def=deployment_def, + init_args=cloudpickle.loads(serialized_init_args), + init_kwargs=cloudpickle.loads(serialized_init_kwargs), + deployment_config=deployment_config, + version=version, + ) + + def push_proxy_handle(self, handle: ActorHandle): + # NOTE(edoakes): it's important to call a method on the proxy handle to + # initialize its state in the C++ core worker. + handle.pong.remote() + + def get_num_ongoing_requests(self) -> int: + """Fetch the number of ongoing requests at this replica (queue length). + + This runs on a separate thread (using a Ray concurrency group) so it will + not be blocked by user code. + """ + return self._replica_impl.get_num_ongoing_requests() + + async def is_allocated(self) -> str: + """poke the replica to check whether it's alive. + + When calling this method on an ActorHandle, it will complete as + soon as the actor has started running. We use this mechanism to + detect when a replica has been allocated a worker slot. + At this time, the replica can transition from PENDING_ALLOCATION + to PENDING_INITIALIZATION startup state. + + Returns: + The PID, actor ID, node ID, node IP, and log filepath id of the replica. + """ + + return ( + os.getpid(), + ray.get_runtime_context().get_actor_id(), + ray.get_runtime_context().get_worker_id(), + ray.get_runtime_context().get_node_id(), + ray.util.get_node_ip_address(), + get_component_logger_file_path(), + ) + + async def initialize_and_get_metadata( + self, deployment_config: DeploymentConfig = None, _after: Optional[Any] = None + ): + """Handles initializing the replica. + + Returns: 3-tuple containing + 1. DeploymentConfig of the replica + 2. DeploymentVersion of the replica + 3. Initialization duration in seconds + """ + # Unused `_after` argument is for scheduling: passing an ObjectRef + # allows delaying this call until after the `_after` call has returned. + await self._replica_impl.initialize(deployment_config) + return self._replica_impl.get_metadata() + + async def check_health(self): + await self._replica_impl.check_health() + + async def reconfigure( + self, deployment_config + ) -> Tuple[DeploymentConfig, DeploymentVersion, Optional[float], Optional[int]]: + await self._replica_impl.reconfigure(deployment_config) + return self._replica_impl.get_metadata() + + def _preprocess_request_args( + self, + pickled_request_metadata: bytes, + request_args: Tuple[Any], + ) -> Tuple[RequestMetadata, Tuple[Any]]: + request_metadata = pickle.loads(pickled_request_metadata) + if request_metadata.is_http_request or request_metadata.is_grpc_request: + request_args = (pickle.loads(request_args[0]),) + + return request_metadata, request_args + + async def handle_request( + self, + pickled_request_metadata: bytes, + *request_args, + **request_kwargs, + ) -> Tuple[bytes, Any]: + """Entrypoint for `stream=False` calls.""" + request_metadata, request_args = self._preprocess_request_args( + pickled_request_metadata, request_args + ) + result = await self._replica_impl.handle_request( + request_metadata, *request_args, **request_kwargs + ) + if request_metadata.is_grpc_request: + result = (request_metadata.grpc_context, result.SerializeToString()) + + return result + + async def handle_request_streaming( + self, + pickled_request_metadata: bytes, + *request_args, + **request_kwargs, + ) -> AsyncGenerator[Any, None]: + """Generator that is the entrypoint for all `stream=True` handle calls.""" + request_metadata, request_args = self._preprocess_request_args( + pickled_request_metadata, request_args + ) + async for result in self._replica_impl.handle_request_streaming( + request_metadata, *request_args, **request_kwargs + ): + if request_metadata.is_grpc_request: + result = (request_metadata.grpc_context, result.SerializeToString()) + + yield result + + async def handle_request_with_rejection( + self, + pickled_request_metadata: bytes, + *request_args, + **request_kwargs, + ) -> AsyncGenerator[Any, None]: + """Entrypoint for all requests with strict max_ongoing_requests enforcement. + + The first response from this generator is always a system message indicating + if the request was accepted (the replica has capacity for the request) or + rejected (the replica is already at max_ongoing_requests). + + For non-streaming requests, there will only be one more message, the unary + result of the user request handler. + + For streaming requests, the subsequent messages will be the results of the + user request handler (which must be a generator). + """ + request_metadata, request_args = self._preprocess_request_args( + pickled_request_metadata, request_args + ) + async for result in self._replica_impl.handle_request_with_rejection( + request_metadata, *request_args, **request_kwargs + ): + if isinstance(result, ReplicaQueueLengthInfo): + yield pickle.dumps(result) + else: + if request_metadata.is_grpc_request: + result = (request_metadata.grpc_context, result.SerializeToString()) + + yield result + + async def handle_request_from_java( + self, + proto_request_metadata: bytes, + *request_args, + **request_kwargs, + ) -> Any: + from ray.serve.generated.serve_pb2 import ( + RequestMetadata as RequestMetadataProto, + ) + + proto = RequestMetadataProto.FromString(proto_request_metadata) + request_metadata: RequestMetadata = RequestMetadata( + request_id=proto.request_id, + internal_request_id=proto.internal_request_id, + call_method=proto.call_method, + multiplexed_model_id=proto.multiplexed_model_id, + route=proto.route, + ) + return await self._replica_impl.handle_request( + request_metadata, *request_args, **request_kwargs + ) + + async def perform_graceful_shutdown(self): + await self._replica_impl.perform_graceful_shutdown() + + def _save_cpu_profile_data(self) -> str: + """Saves CPU profiling data, if CPU profiling is enabled. + + Logs a warning if CPU profiling is disabled. + """ + + if self.cpu_profiler is not None: + import marshal + + self.cpu_profiler.snapshot_stats() + with open(self.cpu_profiler_log, "wb") as f: + marshal.dump(self.cpu_profiler.stats, f) + logger.info(f'Saved CPU profile data to file "{self.cpu_profiler_log}"') + return self.cpu_profiler_log + else: + logger.error( + "Attempted to save CPU profile data, but failed because no " + "CPU profiler was running! Enable CPU profiling by enabling " + "the RAY_SERVE_ENABLE_CPU_PROFILING env var." + ) + + +@dataclass +class UserMethodInfo: + """Wrapper for a user method and its relevant metadata.""" + + callable: Callable + name: str + is_asgi_app: bool + takes_any_args: bool + takes_grpc_context_kwarg: bool + + @classmethod + def from_callable(cls, c: Callable, *, is_asgi_app: bool) -> "UserMethodInfo": + params = inspect.signature(c).parameters + return cls( + callable=c, + name=c.__name__, + is_asgi_app=is_asgi_app, + takes_any_args=len(params) > 0, + takes_grpc_context_kwarg=GRPC_CONTEXT_ARG_NAME in params, + ) + + +class UserCallableWrapper: + """Wraps a user-provided callable that is used to handle requests to a replica.""" + + def __init__( + self, + deployment_def: Callable, + init_args: Tuple, + init_kwargs: Dict, + *, + deployment_id: DeploymentID, + run_sync_methods_in_threadpool: bool, + ): + if not (inspect.isfunction(deployment_def) or inspect.isclass(deployment_def)): + raise TypeError( + "deployment_def must be a function or class. Instead, its type was " + f"{type(deployment_def)}." + ) + + self._deployment_def = deployment_def + self._init_args = init_args + self._init_kwargs = init_kwargs + self._is_function = inspect.isfunction(deployment_def) + self._deployment_id = deployment_id + self._destructor_called = False + self._run_sync_methods_in_threadpool = run_sync_methods_in_threadpool + self._warned_about_sync_method_change = False + self._cached_user_method_info: Dict[str, UserMethodInfo] = {} + + # Will be populated in `initialize_callable`. + self._callable = None + + # All interactions with user code run on this loop to avoid blocking the + # replica's main event loop. + self._user_code_event_loop: asyncio.AbstractEventLoop = asyncio.new_event_loop() + + def _run_user_code_event_loop(): + # Required so that calls to get the current running event loop work + # properly in user code. + asyncio.set_event_loop(self._user_code_event_loop) + self._user_code_event_loop.run_forever() + + self._user_code_event_loop_thread = threading.Thread( + daemon=True, + target=_run_user_code_event_loop, + ) + self._user_code_event_loop_thread.start() + + def _run_on_user_code_event_loop(f: Callable) -> Callable: + """Decorator to run a coroutine method on the user code event loop. + + The method will be modified to be a sync function that returns a + `concurrent.futures.Future`. + """ + assert inspect.iscoroutinefunction( + f + ), "_run_on_user_code_event_loop can only be used on coroutine functions." + + @functools.wraps(f) + def wrapper(self, *args, **kwargs) -> concurrent.futures.Future: + return asyncio.run_coroutine_threadsafe( + f(self, *args, **kwargs), + self._user_code_event_loop, + ) + + return wrapper + + @_run_on_user_code_event_loop + async def set_sync_method_threadpool_limit(self, limit: int): + # NOTE(edoakes): the limit is thread local, so this must + # be run on the user code event loop. + to_thread.current_default_thread_limiter().total_tokens = limit + + def _get_user_method_info(self, method_name: str) -> UserMethodInfo: + """Get UserMethodInfo for the provided call method name. + + This method is cached to avoid repeated expensive calls to `inspect.signature`. + """ + if method_name in self._cached_user_method_info: + return self._cached_user_method_info[method_name] + + if self._is_function: + user_method = self._callable + elif hasattr(self._callable, method_name): + user_method = getattr(self._callable, method_name) + else: + # Filter to methods that don't start with '__' prefix. + def callable_method_filter(attr): + if attr.startswith("__"): + return False + elif not callable(getattr(self._callable, attr)): + return False + + return True + + methods = list(filter(callable_method_filter, dir(self._callable))) + raise RayServeException( + f"Tried to call a method '{method_name}' " + "that does not exist. Available methods: " + f"{methods}." + ) + + info = UserMethodInfo.from_callable( + user_method, + is_asgi_app=isinstance(self._callable, ASGIAppReplicaWrapper), + ) + self._cached_user_method_info[method_name] = info + return info + + async def _send_user_result_over_asgi( + self, + result: Any, + asgi_args: ASGIArgs, + ): + """Handle the result from user code and send it over the ASGI interface. + + If the result is already a Response type, it is sent directly. Otherwise, it + is converted to a custom Response type that handles serialization for + common Python objects. + """ + scope, receive, send = asgi_args.to_args_tuple() + if isinstance(result, starlette.responses.Response): + await result(scope, receive, send) + else: + await Response(result).send(scope, receive, send) + + async def _call_func_or_gen( + self, + callable: Callable, + *, + args: Optional[Tuple[Any]] = None, + kwargs: Optional[Dict[str, Any]] = None, + request_metadata: Optional[RequestMetadata] = None, + generator_result_callback: Optional[Callable] = None, + run_sync_methods_in_threadpool_override: Optional[bool] = None, + ) -> Tuple[Any, bool]: + """Call the callable with the provided arguments. + + This is a convenience wrapper that will work for `def`, `async def`, + generator, and async generator functions. + + Returns the result and a boolean indicating if the result was a sync generator + that has already been consumed. + """ + sync_gen_consumed = False + args = args if args is not None else tuple() + kwargs = kwargs if kwargs is not None else dict() + run_sync_in_threadpool = ( + self._run_sync_methods_in_threadpool + if run_sync_methods_in_threadpool_override is None + else run_sync_methods_in_threadpool_override + ) + is_sync_method = ( + inspect.isfunction(callable) or inspect.ismethod(callable) + ) and not ( + inspect.iscoroutinefunction(callable) + or inspect.isasyncgenfunction(callable) + ) + + if is_sync_method and run_sync_in_threadpool: + is_generator = inspect.isgeneratorfunction(callable) + if is_generator: + sync_gen_consumed = True + if request_metadata and not request_metadata.is_streaming: + # TODO(edoakes): make this check less redundant with the one in + # _handle_user_method_result. + raise TypeError( + f"Method '{callable.__name__}' returned a generator. " + "You must use `handle.options(stream=True)` to call " + "generators on a deployment." + ) + + def run_callable(): + result = callable(*args, **kwargs) + if is_generator: + for r in result: + generator_result_callback(r) + + result = None + + return result + + # NOTE(edoakes): we use anyio.to_thread here because it's what Starlette + # uses (and therefore FastAPI too). The max size of the threadpool is + # set to max_ongoing_requests in the replica wrapper. + # anyio.to_thread propagates ContextVars to the worker thread automatically. + result = await to_thread.run_sync(run_callable) + else: + if ( + is_sync_method + and not self._warned_about_sync_method_change + and run_sync_methods_in_threadpool_override is None + ): + self._warned_about_sync_method_change = True + warnings.warn( + RAY_SERVE_RUN_SYNC_IN_THREADPOOL_WARNING.format( + method_name=callable.__name__, + ) + ) + + result = callable(*args, **kwargs) + if inspect.iscoroutine(result): + result = await result + + return result, sync_gen_consumed + + @property + def user_callable(self) -> Optional[Callable]: + return self._callable + + @_run_on_user_code_event_loop + async def initialize_callable(self) -> Optional[ASGIApp]: + """Initialize the user callable. + + If the callable is an ASGI app wrapper (e.g., using @serve.ingress), returns + the ASGI app object, which may be used *read only* by the caller. + """ + if self._callable is not None: + raise RuntimeError("initialize_callable should only be called once.") + + # This closure initializes user code and finalizes replica + # startup. By splitting the initialization step like this, + # we can already access this actor before the user code + # has finished initializing. + # The supervising state manager can then wait + # for allocation of this replica by using the `is_allocated` + # method. After that, it calls `reconfigure` to trigger + # user code initialization. + logger.info( + "Started initializing replica.", + extra={"log_to_stderr": False}, + ) + + if self._is_function: + self._callable = self._deployment_def + else: + # This allows deployments to define an async __init__ + # method (mostly used for testing). + self._callable = self._deployment_def.__new__(self._deployment_def) + await self._call_func_or_gen( + self._callable.__init__, + args=self._init_args, + kwargs=self._init_kwargs, + # Always run the constructor on the main user code thread. + run_sync_methods_in_threadpool_override=False, + ) + + if isinstance(self._callable, ASGIAppReplicaWrapper): + await self._callable._run_asgi_lifespan_startup() + + self._user_health_check = getattr(self._callable, HEALTH_CHECK_METHOD, None) + + logger.info( + "Finished initializing replica.", + extra={"log_to_stderr": False}, + ) + + return ( + self._callable.app + if isinstance(self._callable, ASGIAppReplicaWrapper) + else None + ) + + def _raise_if_not_initialized(self, method_name: str): + if self._callable is None: + raise RuntimeError( + f"`initialize_callable` must be called before `{method_name}`." + ) + + def call_user_health_check(self) -> Optional[concurrent.futures.Future]: + self._raise_if_not_initialized("call_user_health_check") + + # If the user provided a health check, call it on the user code thread. If user + # code blocks the event loop the health check may time out. + # + # To avoid this issue for basic cases without a user-defined health check, skip + # interacting with the user callable entirely. + if self._user_health_check is not None: + return self._call_user_health_check() + + return None + + @_run_on_user_code_event_loop + async def _call_user_health_check(self): + await self._call_func_or_gen(self._user_health_check) + + @_run_on_user_code_event_loop + async def call_reconfigure(self, user_config: Any): + self._raise_if_not_initialized("call_reconfigure") + + # NOTE(edoakes): there is the possibility of a race condition in user code if + # they don't have any form of concurrency control between `reconfigure` and + # other methods. See https://github.com/ray-project/ray/pull/42159. + if user_config is not None: + if self._is_function: + raise ValueError("deployment_def must be a class to use user_config") + elif not hasattr(self._callable, RECONFIGURE_METHOD): + raise RayServeException( + "user_config specified but deployment " + + self._deployment_id + + " missing " + + RECONFIGURE_METHOD + + " method" + ) + await self._call_func_or_gen( + getattr(self._callable, RECONFIGURE_METHOD), + args=(user_config,), + ) + + def _prepare_args_for_http_request( + self, + request: StreamingHTTPRequest, + request_metadata: RequestMetadata, + user_method_info: UserMethodInfo, + *, + generator_result_callback: Optional[Callable] = None, + ) -> Tuple[Tuple[Any], ASGIArgs, asyncio.Task]: + """Prepare arguments for a user method handling an HTTP request. + + Returns (request_args, asgi_args, receive_task). + + The returned `receive_task` should be cancelled when the user method exits. + """ + scope = request.asgi_scope + receive = ASGIReceiveProxy( + scope, + request_metadata, + request.receive_asgi_messages, + ) + receive_task = self._user_code_event_loop.create_task( + receive.fetch_until_disconnect() + ) + + async def _send(message: Message): + return generator_result_callback(message) + + asgi_args = ASGIArgs( + scope=scope, + receive=receive, + send=_send, + ) + if user_method_info.is_asgi_app: + request_args = asgi_args.to_args_tuple() + elif not user_method_info.takes_any_args: + # Edge case to support empty HTTP handlers: don't pass the Request + # argument if the callable has no parameters. + request_args = tuple() + else: + # Non-FastAPI HTTP handlers take only the starlette `Request`. + request_args = (asgi_args.to_starlette_request(),) + + return request_args, asgi_args, receive_task + + def _prepare_args_for_grpc_request( + self, + request: gRPCRequest, + request_metadata: RequestMetadata, + user_method_info: UserMethodInfo, + ) -> Tuple[Tuple[Any], Dict[str, Any]]: + """Prepare args and kwargs for a user method handling a gRPC request. + + The sole argument is always the user request proto. + + If the method has a "context" kwarg, we pass the gRPC context, else no kwargs. + """ + request_kwargs = ( + {GRPC_CONTEXT_ARG_NAME: request_metadata.grpc_context} + if user_method_info.takes_grpc_context_kwarg + else {} + ) + return (request.user_request_proto,), request_kwargs + + async def _handle_user_method_result( + self, + result: Any, + request_metadata: RequestMetadata, + user_method_info: UserMethodInfo, + *, + sync_gen_consumed: bool, + generator_result_callback: Optional[Callable], + asgi_args: Optional[ASGIArgs], + ) -> Any: + """Postprocess the result of a user method. + + User methods can be regular unary functions or return a sync or async generator. + This method will raise an exception if the result is not of the expected type + (e.g., non-generator for streaming requests or generator for unary requests). + + Generator outputs will be written to the `generator_result_callback`. + + Note that HTTP requests are an exception: they are *always* streaming requests, + but for ASGI apps (like FastAPI), the actual method will be a regular function + implementing the ASGI `__call__` protocol. + """ + result_is_gen = inspect.isgenerator(result) + result_is_async_gen = inspect.isasyncgen(result) + if request_metadata.is_streaming: + if result_is_gen: + for r in result: + generator_result_callback(r) + elif result_is_async_gen: + async for r in result: + generator_result_callback(r) + elif request_metadata.is_http_request and not user_method_info.is_asgi_app: + # For the FastAPI codepath, the response has already been sent over + # ASGI, but for the vanilla deployment codepath we need to send it. + await self._send_user_result_over_asgi(result, asgi_args) + elif not request_metadata.is_http_request and not sync_gen_consumed: + # If a unary method is called with stream=True for anything EXCEPT + # an HTTP request, raise an error. + # HTTP requests are always streaming regardless of if the method + # returns a generator, because it's provided the result queue as its + # ASGI `send` interface to stream back results. + raise TypeError( + f"Called method '{user_method_info.name}' with " + "`handle.options(stream=True)` but it did not return a " + "generator." + ) + else: + assert ( + not request_metadata.is_http_request + ), "All HTTP requests go through the streaming codepath." + + if result_is_gen or result_is_async_gen: + raise TypeError( + f"Method '{user_method_info.name}' returned a generator. " + "You must use `handle.options(stream=True)` to call " + "generators on a deployment." + ) + + return result + + @_run_on_user_code_event_loop + async def call_user_method( + self, + request_metadata: RequestMetadata, + request_args: Tuple[Any], + request_kwargs: Dict[str, Any], + *, + generator_result_callback: Optional[Callable] = None, + ) -> Any: + """Call a user method (unary or generator). + + The `generator_result_callback` is used to communicate the results of generator + methods. + + Raises any exception raised by the user code so it can be propagated as a + `RayTaskError`. + """ + self._raise_if_not_initialized("call_user_method") + + logger.info( + f"Started executing request to method '{request_metadata.call_method}'.", + extra={"log_to_stderr": False, "serve_access_log": True}, + ) + + result = None + asgi_args = None + receive_task = None + user_method_info = None + try: + user_method_info = self._get_user_method_info(request_metadata.call_method) + if request_metadata.is_http_request: + assert len(request_args) == 1 and isinstance( + request_args[0], StreamingHTTPRequest + ) + ( + request_args, + asgi_args, + receive_task, + ) = self._prepare_args_for_http_request( + request_args[0], + request_metadata, + user_method_info, + generator_result_callback=generator_result_callback, + ) + elif request_metadata.is_grpc_request: + assert len(request_args) == 1 and isinstance( + request_args[0], gRPCRequest + ) + request_args, request_kwargs = self._prepare_args_for_grpc_request( + request_args[0], request_metadata, user_method_info + ) + + result, sync_gen_consumed = await self._call_func_or_gen( + user_method_info.callable, + args=request_args, + kwargs=request_kwargs, + request_metadata=request_metadata, + generator_result_callback=generator_result_callback + if request_metadata.is_streaming + else None, + ) + final_result = await self._handle_user_method_result( + result, + request_metadata, + user_method_info, + sync_gen_consumed=sync_gen_consumed, + generator_result_callback=generator_result_callback, + asgi_args=asgi_args, + ) + + if receive_task is not None and not receive_task.done(): + receive_task.cancel() + + return final_result + except Exception: + if ( + request_metadata.is_http_request + and asgi_args is not None + and user_method_info is not None + # If the callable is an ASGI app, it already sent a 500 status response. + and not user_method_info.is_asgi_app + ): + await self._send_user_result_over_asgi( + starlette.responses.Response( + "Internal Server Error", status_code=500 + ), + asgi_args, + ) + + if receive_task is not None and not receive_task.done(): + receive_task.cancel() + + raise + except asyncio.CancelledError: + user_method_info = self._get_user_method_info(request_metadata.call_method) + if receive_task is not None and not receive_task.done(): + # Do NOT cancel the receive task if the request has been + # cancelled, but the call is a batched call. This is + # because we cannot guarantee cancelling the batched + # call, so in the case that the call continues executing + # we should continue fetching data from the client. + if not hasattr(user_method_info.callable, "set_max_batch_size"): + receive_task.cancel() + + raise + + @_run_on_user_code_event_loop + async def call_destructor(self): + """Explicitly call the `__del__` method of the user callable. + + Calling this multiple times has no effect; only the first call will + actually call the destructor. + """ + if self._callable is None: + logger.info( + "This replica has not yet started running user code. " + "Skipping __del__." + ) + return + + # Only run the destructor once. This is safe because there is no `await` between + # checking the flag here and flipping it to `True` below. + if self._destructor_called: + return + + self._destructor_called = True + try: + if hasattr(self._callable, "__del__"): + # Make sure to accept `async def __del__(self)` as well. + await self._call_func_or_gen( + self._callable.__del__, + # Always run the destructor on the main user callable thread. + run_sync_methods_in_threadpool_override=False, + ) + + if hasattr(self._callable, "__serve_multiplex_wrapper"): + await getattr(self._callable, "__serve_multiplex_wrapper").shutdown() + + except Exception as e: + logger.exception(f"Exception during graceful shutdown of replica: {e}") diff --git a/.venv/lib/python3.11/site-packages/ray/serve/_private/usage.py b/.venv/lib/python3.11/site-packages/ray/serve/_private/usage.py new file mode 100644 index 0000000000000000000000000000000000000000..caadb9a4ecf758a521983352299504b98651f901 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/ray/serve/_private/usage.py @@ -0,0 +1,49 @@ +from enum import Enum +from typing import Dict, Optional + +from ray._private.usage.usage_lib import TagKey, record_extra_usage_tag + + +class ServeUsageTag(Enum): + API_VERSION = TagKey.SERVE_API_VERSION + NUM_DEPLOYMENTS = TagKey.SERVE_NUM_DEPLOYMENTS + GCS_STORAGE = TagKey.GCS_STORAGE + NUM_GPU_DEPLOYMENTS = TagKey.SERVE_NUM_GPU_DEPLOYMENTS + FASTAPI_USED = TagKey.SERVE_FASTAPI_USED + DAG_DRIVER_USED = TagKey.SERVE_DAG_DRIVER_USED + HTTP_ADAPTER_USED = TagKey.SERVE_HTTP_ADAPTER_USED + GRPC_INGRESS_USED = TagKey.SERVE_GRPC_INGRESS_USED + REST_API_VERSION = TagKey.SERVE_REST_API_VERSION + NUM_APPS = TagKey.SERVE_NUM_APPS + NUM_REPLICAS_LIGHTWEIGHT_UPDATED = TagKey.SERVE_NUM_REPLICAS_LIGHTWEIGHT_UPDATED + USER_CONFIG_LIGHTWEIGHT_UPDATED = TagKey.SERVE_USER_CONFIG_LIGHTWEIGHT_UPDATED + AUTOSCALING_CONFIG_LIGHTWEIGHT_UPDATED = ( + TagKey.SERVE_AUTOSCALING_CONFIG_LIGHTWEIGHT_UPDATED + ) + DEPLOYMENT_HANDLE_API_USED = TagKey.SERVE_DEPLOYMENT_HANDLE_API_USED + DEPLOYMENT_HANDLE_TO_OBJECT_REF_API_USED = ( + TagKey.SERVE_DEPLOYMENT_HANDLE_TO_OBJECT_REF_API_USED + ) + MULTIPLEXED_API_USED = TagKey.SERVE_MULTIPLEXED_API_USED + HTTP_PROXY_USED = TagKey.SERVE_HTTP_PROXY_USED + GRPC_PROXY_USED = TagKey.SERVE_GRPC_PROXY_USED + SERVE_STATUS_API_USED = TagKey.SERVE_STATUS_API_USED + SERVE_GET_APP_HANDLE_API_USED = TagKey.SERVE_GET_APP_HANDLE_API_USED + SERVE_GET_DEPLOYMENT_HANDLE_API_USED = TagKey.SERVE_GET_DEPLOYMENT_HANDLE_API_USED + APP_CONTAINER_RUNTIME_ENV_USED = TagKey.SERVE_APP_CONTAINER_RUNTIME_ENV_USED + DEPLOYMENT_CONTAINER_RUNTIME_ENV_USED = ( + TagKey.SERVE_DEPLOYMENT_CONTAINER_RUNTIME_ENV_USED + ) + NUM_NODE_COMPACTIONS = TagKey.SERVE_NUM_NODE_COMPACTIONS + AUTO_NUM_REPLICAS_USED = TagKey.SERVE_AUTO_NUM_REPLICAS_USED + + def record(self, value: str): + """Record telemetry value.""" + record_extra_usage_tag(self.value, value) + + def get_value_from_report(self, report: Dict) -> Optional[str]: + """Returns `None` if the tag isn't in the report.""" + if "extra_usage_tags" not in report: + return None + + return report["extra_usage_tags"].get(TagKey.Name(self.value).lower(), None) diff --git a/.venv/lib/python3.11/site-packages/ray/serve/api.py b/.venv/lib/python3.11/site-packages/ray/serve/api.py new file mode 100644 index 0000000000000000000000000000000000000000..ca68fbc1d7ab248531d2333e392704b08a6bb86e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/ray/serve/api.py @@ -0,0 +1,891 @@ +import collections +import inspect +import logging +import time +from functools import wraps +from typing import Any, Callable, Dict, List, Optional, Union + +from fastapi import APIRouter, FastAPI + +import ray +from ray import cloudpickle +from ray._private.serialization import pickle_dumps +from ray.serve._private.build_app import build_app +from ray.serve._private.config import ( + DeploymentConfig, + ReplicaConfig, + handle_num_replicas_auto, +) +from ray.serve._private.constants import ( + RAY_SERVE_FORCE_LOCAL_TESTING_MODE, + SERVE_DEFAULT_APP_NAME, + SERVE_LOGGER_NAME, +) +from ray.serve._private.http_util import ( + ASGIAppReplicaWrapper, + make_fastapi_class_based_view, +) +from ray.serve._private.local_testing_mode import make_local_deployment_handle +from ray.serve._private.logging_utils import configure_component_logger +from ray.serve._private.usage import ServeUsageTag +from ray.serve._private.utils import ( + DEFAULT, + Default, + ensure_serialization_context, + extract_self_if_method_call, + validate_route_prefix, +) +from ray.serve.config import ( + AutoscalingConfig, + DeploymentMode, + HTTPOptions, + ProxyLocation, + gRPCOptions, +) +from ray.serve.context import ( + ReplicaContext, + _get_global_client, + _get_internal_replica_context, + _set_global_client, +) +from ray.serve.deployment import Application, Deployment +from ray.serve.exceptions import RayServeException +from ray.serve.handle import DeploymentHandle +from ray.serve.multiplex import _ModelMultiplexWrapper +from ray.serve.schema import LoggingConfig, ServeInstanceDetails, ServeStatus +from ray.util.annotations import DeveloperAPI, PublicAPI + +from ray.serve._private import api as _private_api # isort:skip + +logger = logging.getLogger(SERVE_LOGGER_NAME) + + +@PublicAPI(stability="stable") +def start( + proxy_location: Union[None, str, ProxyLocation] = None, + http_options: Union[None, dict, HTTPOptions] = None, + grpc_options: Union[None, dict, gRPCOptions] = None, + logging_config: Union[None, dict, LoggingConfig] = None, + **kwargs, +): + """Start Serve on the cluster. + + Used to set cluster-scoped configurations such as HTTP options. In most cases, this + does not need to be called manually and Serve will be started when an application is + first deployed to the cluster. + + These cluster-scoped options cannot be updated dynamically. To update them, start a + new cluster or shut down Serve on the cluster and start it again. + + These options can also be set in the config file deployed via REST API. + + Args: + proxy_location: Where to run proxies that handle ingress traffic to the + cluster (defaults to every node in the cluster with at least one replica on + it). See `ProxyLocation` for supported options. + http_options: HTTP config options for the proxies. These can be passed as an + unstructured dictionary or the structured `HTTPOptions` class. See + `HTTPOptions` for supported options. + grpc_options: [EXPERIMENTAL] gRPC config options for the proxies. These can + be passed as an unstructured dictionary or the structured `gRPCOptions` + class See `gRPCOptions` for supported options. + logging_config: logging config options for the serve component ( + controller & proxy). + """ + if proxy_location is None: + if http_options is None: + http_options = HTTPOptions(location=DeploymentMode.EveryNode) + else: + if http_options is None: + http_options = HTTPOptions() + elif isinstance(http_options, dict): + http_options = HTTPOptions(**http_options) + + if isinstance(proxy_location, str): + proxy_location = ProxyLocation(proxy_location) + + http_options.location = ProxyLocation._to_deployment_mode(proxy_location) + + _private_api.serve_start( + http_options=http_options, + grpc_options=grpc_options, + global_logging_config=logging_config, + **kwargs, + ) + + +@PublicAPI(stability="stable") +def shutdown(): + """Completely shut down Serve on the cluster. + + Deletes all applications and shuts down Serve system actors. + """ + + try: + client = _get_global_client() + except RayServeException: + logger.info( + "Nothing to shut down. There's no Serve application " + "running on this Ray cluster." + ) + return + + client.shutdown() + _set_global_client(None) + + +@DeveloperAPI +def get_replica_context() -> ReplicaContext: + """Returns the deployment and replica tag from within a replica at runtime. + + A replica tag uniquely identifies a single replica for a Ray Serve + deployment. + + Raises: + RayServeException: if not called from within a Ray Serve deployment. + + Example: + + .. code-block:: python + + from ray import serve + @serve.deployment + class MyDeployment: + def __init__(self): + # Prints "MyDeployment" + print(serve.get_replica_context().deployment) + + """ + internal_replica_context = _get_internal_replica_context() + if internal_replica_context is None: + raise RayServeException( + "`serve.get_replica_context()` " + "may only be called from within a " + "Ray Serve deployment." + ) + return internal_replica_context + + +@PublicAPI(stability="stable") +def ingress(app: Union["FastAPI", "APIRouter", Callable]) -> Callable: + """Wrap a deployment class with a FastAPI application for HTTP request parsing. + + Example: + + .. code-block:: python + + from ray import serve + from fastapi import FastAPI + + app = FastAPI() + + @serve.deployment + @serve.ingress(app) + class MyFastAPIDeployment: + @app.get("/hi") + def say_hi(self) -> str: + return "Hello world!" + + app = MyFastAPIDeployment.bind() + + Args: + app: the FastAPI app or router object to wrap this class with. + Can be any ASGI-compatible callable. + """ + + def decorator(cls): + if not inspect.isclass(cls): + raise ValueError("@serve.ingress must be used with a class.") + + if issubclass(cls, collections.abc.Callable): + raise ValueError( + "Classes passed to @serve.ingress may not have __call__ method." + ) + + # Sometimes there are decorators on the methods. We want to fix + # the fast api routes here. + if isinstance(app, (FastAPI, APIRouter)): + make_fastapi_class_based_view(app, cls) + + # Free the state of the app so subsequent modification won't affect + # this ingress deployment. We don't use copy.copy here to avoid + # recursion issue. + ensure_serialization_context() + frozen_app = cloudpickle.loads( + pickle_dumps(app, error_msg="Failed to serialize the FastAPI app.") + ) + + class ASGIIngressWrapper(cls, ASGIAppReplicaWrapper): + def __init__(self, *args, **kwargs): + # Call user-defined constructor. + cls.__init__(self, *args, **kwargs) + + ServeUsageTag.FASTAPI_USED.record("1") + ASGIAppReplicaWrapper.__init__(self, frozen_app) + + async def __del__(self): + await ASGIAppReplicaWrapper.__del__(self) + + # Call user-defined destructor if defined. + if hasattr(cls, "__del__"): + if inspect.iscoroutinefunction(cls.__del__): + await cls.__del__(self) + else: + cls.__del__(self) + + ASGIIngressWrapper.__name__ = cls.__name__ + if hasattr(frozen_app, "docs_url"): + ASGIIngressWrapper.__fastapi_docs_path__ = frozen_app.docs_url + + return ASGIIngressWrapper + + return decorator + + +@PublicAPI(stability="stable") +def deployment( + _func_or_class: Optional[Callable] = None, + name: Default[str] = DEFAULT.VALUE, + version: Default[str] = DEFAULT.VALUE, + num_replicas: Default[Optional[Union[int, str]]] = DEFAULT.VALUE, + route_prefix: Default[Union[str, None]] = DEFAULT.VALUE, + ray_actor_options: Default[Dict] = DEFAULT.VALUE, + placement_group_bundles: Default[List[Dict[str, float]]] = DEFAULT.VALUE, + placement_group_strategy: Default[str] = DEFAULT.VALUE, + max_replicas_per_node: Default[int] = DEFAULT.VALUE, + user_config: Default[Optional[Any]] = DEFAULT.VALUE, + max_ongoing_requests: Default[int] = DEFAULT.VALUE, + max_queued_requests: Default[int] = DEFAULT.VALUE, + autoscaling_config: Default[Union[Dict, AutoscalingConfig, None]] = DEFAULT.VALUE, + graceful_shutdown_wait_loop_s: Default[float] = DEFAULT.VALUE, + graceful_shutdown_timeout_s: Default[float] = DEFAULT.VALUE, + health_check_period_s: Default[float] = DEFAULT.VALUE, + health_check_timeout_s: Default[float] = DEFAULT.VALUE, + logging_config: Default[Union[Dict, LoggingConfig, None]] = DEFAULT.VALUE, +) -> Callable[[Callable], Deployment]: + """Decorator that converts a Python class to a `Deployment`. + + Example: + + .. code-block:: python + + from ray import serve + + @serve.deployment(num_replicas=2) + class MyDeployment: + pass + + app = MyDeployment.bind() + + Args: + name: Name uniquely identifying this deployment within the application. + If not provided, the name of the class or function is used. + num_replicas: Number of replicas to run that handle requests to + this deployment. Defaults to 1. + autoscaling_config: Parameters to configure autoscaling behavior. If this + is set, `num_replicas` cannot be set. + ray_actor_options: Options to pass to the Ray Actor decorator, such as + resource requirements. Valid options are: `accelerator_type`, `memory`, + `num_cpus`, `num_gpus`, `resources`, and `runtime_env`. + placement_group_bundles: Defines a set of placement group bundles to be + scheduled *for each replica* of this deployment. The replica actor will + be scheduled in the first bundle provided, so the resources specified in + `ray_actor_options` must be a subset of the first bundle's resources. All + actors and tasks created by the replica actor will be scheduled in the + placement group by default (`placement_group_capture_child_tasks` is set + to True). + This cannot be set together with max_replicas_per_node. + placement_group_strategy: Strategy to use for the replica placement group + specified via `placement_group_bundles`. Defaults to `PACK`. + user_config: Config to pass to the reconfigure method of the deployment. This + can be updated dynamically without restarting the replicas of the + deployment. The user_config must be fully JSON-serializable. + max_ongoing_requests: Maximum number of requests that are sent to a + replica of this deployment without receiving a response. Defaults to 5. + max_queued_requests: [EXPERIMENTAL] Maximum number of requests to this + deployment that will be queued at each *caller* (proxy or DeploymentHandle). + Once this limit is reached, subsequent requests will raise a + BackPressureError (for handles) or return an HTTP 503 status code (for HTTP + requests). Defaults to -1 (no limit). + health_check_period_s: Duration between health check calls for the replica. + Defaults to 10s. The health check is by default a no-op Actor call to the + replica, but you can define your own health check using the "check_health" + method in your deployment that raises an exception when unhealthy. + health_check_timeout_s: Duration in seconds, that replicas wait for a health + check method to return before considering it as failed. Defaults to 30s. + graceful_shutdown_wait_loop_s: Duration that replicas wait until there is + no more work to be done before shutting down. Defaults to 2s. + graceful_shutdown_timeout_s: Duration to wait for a replica to gracefully + shut down before being forcefully killed. Defaults to 20s. + max_replicas_per_node: The max number of replicas of this deployment that can + run on a single node. Valid values are None (default, no limit) + or an integer in the range of [1, 100]. + This cannot be set together with placement_group_bundles. + + Returns: + `Deployment` + """ + if route_prefix is not DEFAULT.VALUE: + raise ValueError( + "`route_prefix` can no longer be specified at the deployment level. " + "Pass it to `serve.run` or in the application config instead." + ) + + if max_ongoing_requests is None: + raise ValueError("`max_ongoing_requests` must be non-null, got None.") + + if num_replicas == "auto": + num_replicas = None + max_ongoing_requests, autoscaling_config = handle_num_replicas_auto( + max_ongoing_requests, autoscaling_config + ) + + ServeUsageTag.AUTO_NUM_REPLICAS_USED.record("1") + + # NOTE: The user_configured_option_names should be the first thing that's + # defined in this function. It depends on the locals() dictionary storing + # only the function args/kwargs. + # Create list of all user-configured options from keyword args + user_configured_option_names = [ + option + for option, value in locals().items() + if option != "_func_or_class" and value is not DEFAULT.VALUE + ] + + # Num of replicas should not be 0. + # TODO(Sihan) seperate num_replicas attribute from internal and api + if num_replicas == 0: + raise ValueError("num_replicas is expected to larger than 0") + + if num_replicas not in [DEFAULT.VALUE, None, "auto"] and autoscaling_config not in [ + DEFAULT.VALUE, + None, + ]: + raise ValueError( + "Manually setting num_replicas is not allowed when " + "autoscaling_config is provided." + ) + + if version is not DEFAULT.VALUE: + logger.warning( + "DeprecationWarning: `version` in `@serve.deployment` has been deprecated. " + "Explicitly specifying version will raise an error in the future!" + ) + + if isinstance(logging_config, LoggingConfig): + logging_config = logging_config.dict() + + deployment_config = DeploymentConfig.from_default( + num_replicas=num_replicas if num_replicas is not None else 1, + user_config=user_config, + max_ongoing_requests=max_ongoing_requests, + max_queued_requests=max_queued_requests, + autoscaling_config=autoscaling_config, + graceful_shutdown_wait_loop_s=graceful_shutdown_wait_loop_s, + graceful_shutdown_timeout_s=graceful_shutdown_timeout_s, + health_check_period_s=health_check_period_s, + health_check_timeout_s=health_check_timeout_s, + logging_config=logging_config, + ) + deployment_config.user_configured_option_names = set(user_configured_option_names) + + def decorator(_func_or_class): + replica_config = ReplicaConfig.create( + _func_or_class, + init_args=None, + init_kwargs=None, + ray_actor_options=( + ray_actor_options if ray_actor_options is not DEFAULT.VALUE else None + ), + placement_group_bundles=( + placement_group_bundles + if placement_group_bundles is not DEFAULT.VALUE + else None + ), + placement_group_strategy=( + placement_group_strategy + if placement_group_strategy is not DEFAULT.VALUE + else None + ), + max_replicas_per_node=( + max_replicas_per_node + if max_replicas_per_node is not DEFAULT.VALUE + else None + ), + ) + + return Deployment( + name if name is not DEFAULT.VALUE else _func_or_class.__name__, + deployment_config, + replica_config, + version=(version if version is not DEFAULT.VALUE else None), + _internal=True, + ) + + # This handles both parametrized and non-parametrized usage of the + # decorator. See the @serve.batch code for more details. + return decorator(_func_or_class) if callable(_func_or_class) else decorator + + +@PublicAPI(stability="stable") +def _run( + target: Application, + *, + _blocking: bool = True, + name: str = SERVE_DEFAULT_APP_NAME, + route_prefix: Optional[str] = "/", + logging_config: Optional[Union[Dict, LoggingConfig]] = None, + _local_testing_mode: bool = False, +) -> DeploymentHandle: + """Run an application and return a handle to its ingress deployment. + + This is only used internally with the _blocking not totally blocking the following + code indefinitely until Ctrl-C'd. + """ + if len(name) == 0: + raise RayServeException("Application name must a non-empty string.") + + if not isinstance(target, Application): + raise TypeError( + "`serve.run` expects an `Application` returned by `Deployment.bind()`." + ) + + if RAY_SERVE_FORCE_LOCAL_TESTING_MODE: + if not _local_testing_mode: + logger.info("Overriding local_testing_mode=True from environment variable.") + + _local_testing_mode = True + + validate_route_prefix(route_prefix) + + if _local_testing_mode: + configure_component_logger( + component_name="local_test", + component_id="-", + logging_config=logging_config or LoggingConfig(), + stream_handler_only=True, + ) + built_app = build_app( + target, + name=name, + make_deployment_handle=make_local_deployment_handle, + ) + handle = built_app.deployment_handles[built_app.ingress_deployment_name] + else: + client = _private_api.serve_start( + http_options={"location": "EveryNode"}, + global_logging_config=logging_config, + ) + # Record after Ray has been started. + ServeUsageTag.API_VERSION.record("v2") + handle = client.deploy_application( + build_app( + target, + name=name, + default_runtime_env=ray.get_runtime_context().runtime_env, + ), + blocking=_blocking, + route_prefix=route_prefix, + logging_config=logging_config, + ) + + return handle + + +@PublicAPI(stability="stable") +def run( + target: Application, + blocking: bool = False, + name: str = SERVE_DEFAULT_APP_NAME, + route_prefix: Optional[str] = "/", + logging_config: Optional[Union[Dict, LoggingConfig]] = None, + _local_testing_mode: bool = False, +) -> DeploymentHandle: + """Run an application and return a handle to its ingress deployment. + + The application is returned by `Deployment.bind()`. Example: + + .. code-block:: python + + handle = serve.run(MyDeployment.bind()) + ray.get(handle.remote()) + + Args: + target: + A Serve application returned by `Deployment.bind()`. + blocking: Whether this call should be blocking. If True, it + will loop and log status until Ctrl-C'd. + name: Application name. If not provided, this will be the only + application running on the cluster (it will delete all others). + route_prefix: Route prefix for HTTP requests. Defaults to '/'. + If `None` is passed, the application will not be exposed over HTTP + (this may be useful if you only want the application to be exposed via + gRPC or a `DeploymentHandle`). + logging_config: Application logging config. If provided, the config will + be applied to all deployments which doesn't have logging config. + + Returns: + DeploymentHandle: A handle that can be used to call the application. + """ + handle = _run( + target=target, + name=name, + route_prefix=route_prefix, + logging_config=logging_config, + _local_testing_mode=_local_testing_mode, + ) + logger.info(f"Deployed app '{name}' successfully.") + + if blocking: + try: + while True: + # Block, letting Ray print logs to the terminal. + time.sleep(10) + except KeyboardInterrupt: + logger.warning("Got KeyboardInterrupt, exiting...") + # We need to re-raise KeyboardInterrupt, so serve components can be shutdown + # from the main script. + raise + return handle + + +@PublicAPI(stability="stable") +def delete(name: str, _blocking: bool = True): + """Delete an application by its name. + + Deletes the app with all corresponding deployments. + """ + client = _get_global_client() + client.delete_apps([name], blocking=_blocking) + + +@PublicAPI(stability="beta") +def multiplexed( + func: Optional[Callable[..., Any]] = None, max_num_models_per_replica: int = 3 +): + """Wrap a callable or method used to load multiplexed models in a replica. + + The function can be standalone function or a method of a class. The + function must have exactly one argument, the model id of type `str` for the + model to be loaded. + + It is required to define the function with `async def` and the function must be + an async function. It is recommended to define coroutines for long running + IO tasks in the function to avoid blocking the event loop. + + The multiplexed function is called to load a model with the given model ID when + necessary. + + When the number of models in one replica is larger than max_num_models_per_replica, + the models will be unloaded using an LRU policy. + + If you want to release resources after the model is loaded, you can define + a `__del__` method in your model class. The `__del__` method will be called when + the model is unloaded. + + Example: + + .. code-block:: python + + from ray import serve + + @serve.deployment + class MultiplexedDeployment: + + def __init__(self): + # Define s3 base path to load models. + self.s3_base_path = "s3://my_bucket/my_models" + + @serve.multiplexed(max_num_models_per_replica=5) + async def load_model(self, model_id: str) -> Any: + # Load model with the given tag + # You can use any model loading library here + # and return the loaded model. load_from_s3 is + # a placeholder function. + return load_from_s3(model_id) + + async def __call__(self, request): + # Get the model_id from the request context. + model_id = serve.get_multiplexed_model_id() + # Load the model for the requested model_id. + # If the model is already cached locally, + # this will just be a dictionary lookup. + model = await self.load_model(model_id) + return model(request) + + + Args: + max_num_models_per_replica: the maximum number of models + to be loaded on each replica. By default, it is 3, which + means that each replica can cache up to 3 models. You can + set it to a larger number if you have enough memory on + the node resource, in opposite, you can set it to a smaller + number if you want to save memory on the node resource. + """ + + if func is not None: + if not callable(func): + raise TypeError( + "The `multiplexed` decorator must be used with a function or method." + ) + + # TODO(Sihan): Make the API accept the sync function as well. + # https://github.com/ray-project/ray/issues/35356 + if not inspect.iscoroutinefunction(func): + raise TypeError( + "@serve.multiplexed can only be used to decorate async " + "functions or methods." + ) + signature = inspect.signature(func) + if len(signature.parameters) == 0 or len(signature.parameters) > 2: + raise TypeError( + "@serve.multiplexed can only be used to decorate functions or methods " + "with at least one 'model_id: str' argument." + ) + + if not isinstance(max_num_models_per_replica, int): + raise TypeError("max_num_models_per_replica must be an integer.") + + if max_num_models_per_replica != -1 and max_num_models_per_replica <= 0: + raise ValueError("max_num_models_per_replica must be positive.") + + def _multiplex_decorator(func: Callable): + @wraps(func) + async def _multiplex_wrapper(*args): + args_check_error_msg = ( + "Functions decorated with `@serve.multiplexed` must take exactly one" + "the multiplexed model ID (str), but got {}" + ) + if not args: + raise TypeError( + args_check_error_msg.format("no arguments are provided.") + ) + self = extract_self_if_method_call(args, func) + + # User defined multiplexed function can be a standalone function or a + # method of a class. If it is a method of a class, the first argument + # is self. + if self is None: + if len(args) != 1: + raise TypeError( + args_check_error_msg.format("more than one arguments.") + ) + multiplex_object = func + model_id = args[0] + else: + # count self as an argument + if len(args) != 2: + raise TypeError( + args_check_error_msg.format("more than one arguments.") + ) + multiplex_object = self + model_id = args[1] + multiplex_attr = "__serve_multiplex_wrapper" + # If the multiplexed function is called for the first time, + # create a model multiplex wrapper and cache it in the multiplex object. + if not hasattr(multiplex_object, multiplex_attr): + model_multiplex_wrapper = _ModelMultiplexWrapper( + func, self, max_num_models_per_replica + ) + setattr(multiplex_object, multiplex_attr, model_multiplex_wrapper) + else: + model_multiplex_wrapper = getattr(multiplex_object, multiplex_attr) + return await model_multiplex_wrapper.load_model(model_id) + + return _multiplex_wrapper + + return _multiplex_decorator(func) if callable(func) else _multiplex_decorator + + +@PublicAPI(stability="beta") +def get_multiplexed_model_id() -> str: + """Get the multiplexed model ID for the current request. + + This is used with a function decorated with `@serve.multiplexed` + to retrieve the model ID for the current request. + + .. code-block:: python + + import ray + from ray import serve + import requests + + # Set the multiplexed model id with the key + # "ray_serve_multiplexed_model_id" in the request + # headers when sending requests to the http proxy. + requests.get("http://localhost:8000", + headers={"ray_serve_multiplexed_model_id": "model_1"}) + + # This can also be set when using `DeploymentHandle`. + handle.options(multiplexed_model_id="model_1").remote("blablabla") + + # In your deployment code, you can retrieve the model id from + # `get_multiplexed_model_id()`. + @serve.deployment + def my_deployment_function(request): + assert serve.get_multiplexed_model_id() == "model_1" + """ + _request_context = ray.serve.context._get_serve_request_context() + return _request_context.multiplexed_model_id + + +@PublicAPI(stability="alpha") +def status() -> ServeStatus: + """Get the status of Serve on the cluster. + + Includes status of all HTTP Proxies, all active applications, and + their deployments. + + .. code-block:: python + + @serve.deployment(num_replicas=2) + class MyDeployment: + pass + + serve.run(MyDeployment.bind()) + status = serve.status() + assert status.applications["default"].status == "RUNNING" + """ + + client = _get_global_client(raise_if_no_controller_running=False) + if client is None: + # Serve has not started yet + return ServeStatus() + + ServeUsageTag.SERVE_STATUS_API_USED.record("1") + details = ServeInstanceDetails(**client.get_serve_details()) + return details._get_status() + + +@PublicAPI(stability="alpha") +def get_app_handle(name: str) -> DeploymentHandle: + """Get a handle to the application's ingress deployment by name. + + Args: + name: Name of application to get a handle to. + + Raises: + RayServeException: If no Serve controller is running, or if the + application does not exist. + + .. code-block:: python + + import ray + from ray import serve + + @serve.deployment + def f(val: int) -> int: + return val * 2 + + serve.run(f.bind(), name="my_app") + handle = serve.get_app_handle("my_app") + assert handle.remote(3).result() == 6 + """ + + client = _get_global_client() + ingress = ray.get(client._controller.get_ingress_deployment_name.remote(name)) + if ingress is None: + raise RayServeException(f"Application '{name}' does not exist.") + + ServeUsageTag.SERVE_GET_APP_HANDLE_API_USED.record("1") + # There is no need to check if the deployment exists since the + # deployment name was just fetched from the controller + return client.get_handle(ingress, name, check_exists=False) + + +@DeveloperAPI +def get_deployment_handle( + deployment_name: str, + app_name: Optional[str] = None, + _check_exists: bool = True, + _record_telemetry: bool = True, +) -> DeploymentHandle: + """Get a handle to a deployment by name. + + This is a developer API and is for advanced Ray users and library developers. + + Args: + deployment_name: Name of deployment to get a handle to. + app_name: Application in which deployment resides. If calling + from inside a Serve application and `app_name` is not + specified, this will default to the application from which + this API is called. + + Raises: + RayServeException: If no Serve controller is running, or if + calling from outside a Serve application and no application + name is specified. + + The following example gets the handle to the ingress deployment of + an application, which is equivalent to using `serve.get_app_handle`. + + .. testcode:: + + import ray + from ray import serve + + @serve.deployment + def f(val: int) -> int: + return val * 2 + + serve.run(f.bind(), name="my_app") + handle = serve.get_deployment_handle("f", app_name="my_app") + assert handle.remote(3).result() == 6 + + serve.shutdown() + + The following example demonstrates how you can use this API to get + the handle to a non-ingress deployment in an application. + + .. testcode:: + + import ray + from ray import serve + from ray.serve.handle import DeploymentHandle + + @serve.deployment + class Multiplier: + def __init__(self, multiple: int): + self._multiple = multiple + + def __call__(self, val: int) -> int: + return val * self._multiple + + @serve.deployment + class Adder: + def __init__(self, handle: DeploymentHandle, increment: int): + self._handle = handle + self._increment = increment + + async def __call__(self, val: int) -> int: + return await self._handle.remote(val) + self._increment + + + # The app calculates 2 * x + 3 + serve.run(Adder.bind(Multiplier.bind(2), 3), name="math_app") + handle = serve.get_app_handle("math_app") + assert handle.remote(5).result() == 13 + + # Get handle to Multiplier only + handle = serve.get_deployment_handle("Multiplier", app_name="math_app") + assert handle.remote(5).result() == 10 + + serve.shutdown() + """ + + client = _get_global_client() + + internal_replica_context = _get_internal_replica_context() + if app_name is None: + if internal_replica_context is None: + raise RayServeException( + "Please specify an application name when getting a deployment handle " + "outside of a Serve application." + ) + else: + app_name = internal_replica_context.app_name + + if _record_telemetry: + ServeUsageTag.SERVE_GET_DEPLOYMENT_HANDLE_API_USED.record("1") + + return client.get_handle(deployment_name, app_name, check_exists=_check_exists) diff --git a/.venv/lib/python3.11/site-packages/ray/serve/autoscaling_policy.py b/.venv/lib/python3.11/site-packages/ray/serve/autoscaling_policy.py new file mode 100644 index 0000000000000000000000000000000000000000..2cabe736a870ac23244f1fa028342bebf0935079 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/ray/serve/autoscaling_policy.py @@ -0,0 +1,159 @@ +import logging +import math +from typing import Any, Dict, Optional + +from ray.serve._private.constants import CONTROL_LOOP_INTERVAL_S, SERVE_LOGGER_NAME +from ray.serve.config import AutoscalingConfig +from ray.util.annotations import PublicAPI + +logger = logging.getLogger(SERVE_LOGGER_NAME) + + +def _calculate_desired_num_replicas( + autoscaling_config: AutoscalingConfig, + total_num_requests: int, + num_running_replicas: int, + override_min_replicas: Optional[float] = None, + override_max_replicas: Optional[float] = None, +) -> int: + """Returns the number of replicas to scale to based on the given metrics. + + Args: + autoscaling_config: The autoscaling parameters to use for this + calculation. + current_num_ongoing_requests (List[float]): A list of the number of + ongoing requests for each replica. Assumes each entry has already + been time-averaged over the desired lookback window. + override_min_replicas: Overrides min_replicas from the config + when calculating the final number of replicas. + override_max_replicas: Overrides max_replicas from the config + when calculating the final number of replicas. + + Returns: + desired_num_replicas: The desired number of replicas to scale to, based + on the input metrics and the current number of replicas. + + """ + if num_running_replicas == 0: + raise ValueError("Number of replicas cannot be zero") + + # Example: if error_ratio == 2.0, we have two times too many ongoing + # requests per replica, so we desire twice as many replicas. + target_num_requests = ( + autoscaling_config.get_target_ongoing_requests() * num_running_replicas + ) + error_ratio: float = total_num_requests / target_num_requests + + # If error ratio >= 1, then the number of ongoing requests per + # replica exceeds the target and we will make an upscale decision, + # so we apply the upscale smoothing factor. Otherwise, the number of + # ongoing requests per replica is lower than the target and we will + # make a downscale decision, so we apply the downscale smoothing + # factor. + if error_ratio >= 1: + scaling_factor = autoscaling_config.get_upscaling_factor() + else: + scaling_factor = autoscaling_config.get_downscaling_factor() + + # Multiply the distance to 1 by the smoothing ("gain") factor (default=1). + smoothed_error_ratio = 1 + ((error_ratio - 1) * scaling_factor) + desired_num_replicas = math.ceil(num_running_replicas * smoothed_error_ratio) + + # If desired num replicas is "stuck" because of the smoothing factor + # (meaning the traffic is low enough for the replicas to downscale + # without the smoothing factor), decrease desired_num_replicas by 1. + if ( + math.ceil(num_running_replicas * error_ratio) < num_running_replicas + and desired_num_replicas == num_running_replicas + ): + desired_num_replicas -= 1 + + min_replicas = autoscaling_config.min_replicas + max_replicas = autoscaling_config.max_replicas + if override_min_replicas is not None: + min_replicas = override_min_replicas + if override_max_replicas is not None: + max_replicas = override_max_replicas + + # Ensure scaled_min_replicas <= desired_num_replicas <= scaled_max_replicas. + desired_num_replicas = max(min_replicas, min(max_replicas, desired_num_replicas)) + + return desired_num_replicas + + +@PublicAPI(stability="alpha") +def replica_queue_length_autoscaling_policy( + curr_target_num_replicas: int, + total_num_requests: int, + num_running_replicas: int, + config: Optional[AutoscalingConfig], + capacity_adjusted_min_replicas: int, + capacity_adjusted_max_replicas: int, + policy_state: Dict[str, Any], +) -> int: + """The default autoscaling policy based on basic thresholds for scaling. + There is a minimum threshold for the average queue length in the cluster + to scale up and a maximum threshold to scale down. Each period, a 'scale + up' or 'scale down' decision is made. This decision must be made for a + specified number of periods in a row before the number of replicas is + actually scaled. See config options for more details. Assumes + `get_decision_num_replicas` is called once every CONTROL_LOOP_PERIOD_S + seconds. + """ + decision_counter = policy_state.get("decision_counter", 0) + if num_running_replicas == 0: + # When 0 replicas and queries are queued, scale up the replicas + if total_num_requests > 0: + return max( + math.ceil(1 * config.get_upscaling_factor()), + curr_target_num_replicas, + ) + return curr_target_num_replicas + + decision_num_replicas = curr_target_num_replicas + + desired_num_replicas = _calculate_desired_num_replicas( + config, + total_num_requests, + num_running_replicas=num_running_replicas, + override_min_replicas=capacity_adjusted_min_replicas, + override_max_replicas=capacity_adjusted_max_replicas, + ) + # Scale up. + if desired_num_replicas > curr_target_num_replicas: + # If the previous decision was to scale down (the counter was + # negative), we reset it and then increment it (set to 1). + # Otherwise, just increment. + if decision_counter < 0: + decision_counter = 0 + decision_counter += 1 + + # Only actually scale the replicas if we've made this decision for + # 'scale_up_consecutive_periods' in a row. + if decision_counter > int(config.upscale_delay_s / CONTROL_LOOP_INTERVAL_S): + decision_counter = 0 + decision_num_replicas = desired_num_replicas + + # Scale down. + elif desired_num_replicas < curr_target_num_replicas: + # If the previous decision was to scale up (the counter was + # positive), reset it to zero before decrementing. + if decision_counter > 0: + decision_counter = 0 + decision_counter -= 1 + + # Only actually scale the replicas if we've made this decision for + # 'scale_down_consecutive_periods' in a row. + if decision_counter < -int(config.downscale_delay_s / CONTROL_LOOP_INTERVAL_S): + decision_counter = 0 + decision_num_replicas = desired_num_replicas + + # Do nothing. + else: + decision_counter = 0 + + policy_state["decision_counter"] = decision_counter + return decision_num_replicas + + +default_autoscaling_policy = replica_queue_length_autoscaling_policy diff --git a/.venv/lib/python3.11/site-packages/ray/serve/batching.py b/.venv/lib/python3.11/site-packages/ray/serve/batching.py new file mode 100644 index 0000000000000000000000000000000000000000..456cbcf36835cc69b6d1c362f53e4b135d3719d7 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/ray/serve/batching.py @@ -0,0 +1,698 @@ +import asyncio +import io +import logging +import time +from collections import deque +from dataclasses import dataclass +from functools import wraps +from inspect import isasyncgenfunction, iscoroutinefunction +from typing import ( + Any, + AsyncGenerator, + Callable, + Coroutine, + Dict, + Generic, + Iterable, + List, + Literal, + Optional, + Protocol, + Tuple, + TypeVar, + overload, +) + +from ray import serve +from ray._private.signature import extract_signature, flatten_args, recover_args +from ray._private.utils import get_or_create_event_loop +from ray.serve._private.constants import SERVE_LOGGER_NAME +from ray.serve._private.utils import extract_self_if_method_call +from ray.serve.exceptions import RayServeException +from ray.util.annotations import PublicAPI + +logger = logging.getLogger(SERVE_LOGGER_NAME) + + +# The user can return these values in their streaming batch handler function to +# indicate that a request is finished, so Serve can terminate the request. +USER_CODE_STREAMING_SENTINELS = [StopIteration, StopAsyncIteration] + + +@dataclass +class _SingleRequest: + self_arg: Any + flattened_args: List[Any] + future: asyncio.Future + + +@dataclass +class _GeneratorResult: + result: Any + next_future: asyncio.Future + + +def _batch_args_kwargs( + list_of_flattened_args: List[List[Any]], +) -> Tuple[Tuple[Any], Dict[Any, Any]]: + """Batch a list of flatten args and returns regular args and kwargs""" + # Ray's flatten arg format is a list with alternating key and values + # e.g. args=(1, 2), kwargs={"key": "val"} got turned into + # [None, 1, None, 2, "key", "val"] + arg_lengths = {len(args) for args in list_of_flattened_args} + assert ( + len(arg_lengths) == 1 + ), "All batch requests should have the same number of parameters." + arg_length = arg_lengths.pop() + + batched_flattened_args = [] + for idx in range(arg_length): + if idx % 2 == 0: + batched_flattened_args.append(list_of_flattened_args[0][idx]) + else: + batched_flattened_args.append( + [item[idx] for item in list_of_flattened_args] + ) + + return recover_args(batched_flattened_args) + + +class _BatchQueue: + def __init__( + self, + max_batch_size: int, + batch_wait_timeout_s: float, + handle_batch_func: Optional[Callable] = None, + ) -> None: + """Async queue that accepts individual items and returns batches. + + Respects max_batch_size and timeout_s; a batch will be returned when + max_batch_size elements are available or the timeout has passed since + the previous get. + + If handle_batch_func is passed in, a background coroutine will run to + poll from the queue and call handle_batch_func on the results. + + Cannot be pickled. + + Arguments: + max_batch_size: max number of elements to return in a batch. + timeout_s: time to wait before returning an incomplete + batch. + handle_batch_func(Optional[Callable]): callback to run in the + background to handle batches if provided. + """ + self.queue: asyncio.Queue[_SingleRequest] = asyncio.Queue() + self.max_batch_size = max_batch_size + self.batch_wait_timeout_s = batch_wait_timeout_s + self.requests_available_event = asyncio.Event() + + # Used for observability. + self.curr_iteration_start_time = time.time() + + self._handle_batch_task = None + self._loop = get_or_create_event_loop() + if handle_batch_func is not None: + self._handle_batch_task = self._loop.create_task( + self._process_batches(handle_batch_func) + ) + self._warn_if_max_batch_size_exceeds_max_ongoing_requests() + + def _warn_if_max_batch_size_exceeds_max_ongoing_requests(self): + """Helper to check whether the max_batch_size is bounded. + + Log a warning to configure `max_ongoing_requests` if it's bounded. + """ + max_ongoing_requests = ( + serve.get_replica_context()._deployment_config.max_ongoing_requests + ) + if max_ongoing_requests < self.max_batch_size: + logger.warning( + f"`max_batch_size` ({self.max_batch_size}) is larger than " + f"`max_ongoing_requests` ({max_ongoing_requests}). This means " + "the replica will never receive a full batch. Please update " + "`max_ongoing_requests` to be >= `max_batch_size`." + ) + + def set_max_batch_size(self, new_max_batch_size: int) -> None: + """Updates queue's max_batch_size.""" + self.max_batch_size = new_max_batch_size + self._warn_if_max_batch_size_exceeds_max_ongoing_requests() + + def put(self, request: Tuple[_SingleRequest, asyncio.Future]) -> None: + self.queue.put_nowait(request) + self.requests_available_event.set() + + async def wait_for_batch(self) -> List[Any]: + """Wait for batch respecting self.max_batch_size and self.timeout_s. + + Returns a batch of up to self.max_batch_size items. Waits for up to + to self.timeout_s after receiving the first request that will be in + the next batch. After the timeout, returns as many items as are ready. + + Always returns a batch with at least one item - will block + indefinitely until an item comes in. + """ + + batch = [] + batch.append(await self.queue.get()) + + # Cache current max_batch_size and batch_wait_timeout_s for this batch. + max_batch_size = self.max_batch_size + batch_wait_timeout_s = self.batch_wait_timeout_s + + # Wait self.timeout_s seconds for new queue arrivals. + batch_start_time = time.time() + while True: + remaining_batch_time_s = max( + batch_wait_timeout_s - (time.time() - batch_start_time), 0 + ) + try: + # Wait for new arrivals. + await asyncio.wait_for( + self.requests_available_event.wait(), remaining_batch_time_s + ) + except asyncio.TimeoutError: + pass + + # Add all new arrivals to the batch. + while len(batch) < max_batch_size and not self.queue.empty(): + batch.append(self.queue.get_nowait()) + + # Only clear the put event if the queue is empty. If it's not empty + # we can start constructing a new batch immediately in the next loop. + # The code that puts items into the queue runs on the same event loop + # as this code, so there's no race condition between the time we + # get objects in the queue (and clear the event) and when objects + # get added to the queue. + if self.queue.empty(): + self.requests_available_event.clear() + + if ( + time.time() - batch_start_time >= batch_wait_timeout_s + or len(batch) >= max_batch_size + ): + break + + return batch + + def _validate_results( + self, results: Iterable[Any], input_batch_length: int + ) -> None: + if len(results) != input_batch_length: + raise RayServeException( + "Batched function doesn't preserve batch size. " + f"The input list has length {input_batch_length} but the " + f"returned list has length {len(results)}." + ) + + async def _consume_func_generator( + self, + func_generator: AsyncGenerator, + initial_futures: List[asyncio.Future], + input_batch_length: int, + ) -> None: + """Consumes batch function generator. + + This function only runs if the function decorated with @serve.batch + is a generator. + """ + + FINISHED_TOKEN = None + + try: + futures = deque(initial_futures) + assert len(futures) == input_batch_length + + async for results in func_generator: + self._validate_results(results, input_batch_length) + for idx in range(input_batch_length): + result, future = results[idx], futures[0] + + if future is FINISHED_TOKEN: + # This caller has already terminated. + futures.append(FINISHED_TOKEN) + elif result in USER_CODE_STREAMING_SENTINELS: + # User's code returned sentinel. No values left + # for caller. Terminate iteration for caller. + _set_exception_if_not_done(future, StopAsyncIteration) + futures.append(FINISHED_TOKEN) + else: + next_future = get_or_create_event_loop().create_future() + _set_result_if_not_done( + future, _GeneratorResult(result, next_future) + ) + futures.append(next_future) + + # Remove processed future. We remove the future at the very + # end of the loop to ensure that if an exception occurs, + # all pending futures will get set in the `except` block. + futures.popleft() + + for future in futures: + if future is not FINISHED_TOKEN: + _set_exception_if_not_done(future, StopAsyncIteration) + except Exception as e: + for future in futures: + if future is not FINISHED_TOKEN: + _set_exception_if_not_done(future, e) + + async def _assign_func_results( + self, + func_future: asyncio.Future, + futures: List[asyncio.Future], + input_batch_length: int, + ): + """Assigns func's results to the list of futures.""" + + try: + results = await func_future + self._validate_results(results, input_batch_length) + for result, future in zip(results, futures): + _set_result_if_not_done(future, result) + except Exception as e: + for future in futures: + _set_exception_if_not_done(future, e) + + async def _process_batches(self, func: Callable) -> None: + """Loops infinitely and processes queued request batches.""" + + while not self._loop.is_closed(): + try: + self.curr_iteration_start_time = time.time() + await self._process_batch(func) + except Exception: + logger.exception( + "_process_batches asyncio task ran into an unexpected exception." + ) + + async def _process_batch(self, func: Callable) -> None: + """Processes queued request batch.""" + + batch: List[_SingleRequest] = await self.wait_for_batch() + # Remove requests that have been cancelled from the batch. If + # all requests have been cancelled, simply return and wait for + # the next batch. + batch = [req for req in batch if not req.future.cancelled()] + if len(batch) == 0: + return + + futures = [item.future for item in batch] + + # Most of the logic in the function should be wrapped in this try- + # except block, so the futures' exceptions can be set if an exception + # occurs. Otherwise, the futures' requests may hang indefinitely. + try: + self_arg = batch[0].self_arg + args, kwargs = _batch_args_kwargs([item.flattened_args for item in batch]) + + # Method call. + if self_arg is not None: + func_future_or_generator = func(self_arg, *args, **kwargs) + # Normal function call. + else: + func_future_or_generator = func(*args, **kwargs) + + if isasyncgenfunction(func): + func_generator = func_future_or_generator + await self._consume_func_generator(func_generator, futures, len(batch)) + else: + func_future = func_future_or_generator + await self._assign_func_results(func_future, futures, len(batch)) + + except Exception as e: + logger.exception("_process_batch ran into an unexpected exception.") + + for future in futures: + _set_exception_if_not_done(future, e) + + def __del__(self): + if ( + self._handle_batch_task is None + or not get_or_create_event_loop().is_running() + ): + return + + # TODO(edoakes): although we try to gracefully shutdown here, it still + # causes some errors when the process exits due to the asyncio loop + # already being destroyed. + self._handle_batch_task.cancel() + + +class _LazyBatchQueueWrapper: + """Stores a _BatchQueue and updates its settings. + + _BatchQueue cannot be pickled, you must construct it lazily + at runtime inside a replica. This class initializes a queue only upon + first access. + """ + + def __init__( + self, + max_batch_size: int = 10, + batch_wait_timeout_s: float = 0.0, + handle_batch_func: Optional[Callable] = None, + ): + self._queue: Optional[_BatchQueue] = None + self.max_batch_size = max_batch_size + self.batch_wait_timeout_s = batch_wait_timeout_s + self.handle_batch_func = handle_batch_func + + @property + def queue(self) -> _BatchQueue: + """Returns _BatchQueue. + + Initializes queue when called for the first time. + """ + if self._queue is None: + self._queue = _BatchQueue( + self.max_batch_size, + self.batch_wait_timeout_s, + self.handle_batch_func, + ) + return self._queue + + def set_max_batch_size(self, new_max_batch_size: int) -> None: + """Updates queue's max_batch_size.""" + + self.max_batch_size = new_max_batch_size + + if self._queue is not None: + self._queue.set_max_batch_size(new_max_batch_size) + + def set_batch_wait_timeout_s(self, new_batch_wait_timeout_s: float) -> None: + self.batch_wait_timeout_s = new_batch_wait_timeout_s + + if self._queue is not None: + self._queue.batch_wait_timeout_s = new_batch_wait_timeout_s + + def get_max_batch_size(self) -> int: + return self.max_batch_size + + def get_batch_wait_timeout_s(self) -> float: + return self.batch_wait_timeout_s + + def _get_curr_iteration_start_time(self) -> Optional[float]: + """Gets current iteration's start time on default _BatchQueue implementation. + + Returns None if the batch handler doesn't use a default _BatchQueue. + """ + + if hasattr(self.queue, "curr_iteration_start_time"): + return self.queue.curr_iteration_start_time + else: + return None + + async def _is_batching_task_alive(self) -> bool: + """Gets whether default _BatchQueue's background task is alive. + + Returns False if the batch handler doesn't use a default _BatchQueue. + """ + + if hasattr(self.queue, "_handle_batch_task"): + return not self.queue._handle_batch_task.done() + else: + return False + + async def _get_handling_task_stack(self) -> Optional[str]: + """Gets the stack for the default _BatchQueue's background task. + + Returns empty string if the batch handler doesn't use a default _BatchQueue. + """ + + if hasattr(self.queue, "_handle_batch_task"): + str_buffer = io.StringIO() + self.queue._handle_batch_task.print_stack(file=str_buffer) + return str_buffer.getvalue() + else: + return None + + +def _validate_max_batch_size(max_batch_size): + if not isinstance(max_batch_size, int): + if isinstance(max_batch_size, float) and max_batch_size.is_integer(): + max_batch_size = int(max_batch_size) + else: + raise TypeError( + f"max_batch_size must be integer >= 1, got {max_batch_size}" + ) + + if max_batch_size < 1: + raise ValueError( + f"max_batch_size must be an integer >= 1, got {max_batch_size}" + ) + + +def _validate_batch_wait_timeout_s(batch_wait_timeout_s): + if not isinstance(batch_wait_timeout_s, (float, int)): + raise TypeError( + "batch_wait_timeout_s must be a float >= 0, " f"got {batch_wait_timeout_s}" + ) + + if batch_wait_timeout_s < 0: + raise ValueError( + "batch_wait_timeout_s must be a float >= 0, " f"got {batch_wait_timeout_s}" + ) + + +SelfType = TypeVar("SelfType", contravariant=True) +T = TypeVar("T") +R = TypeVar("R") + + +class _SyncBatchingMethod(Protocol, Generic[SelfType, T, R]): + def __call__(self, self_: SelfType, __batch: List[T], /) -> List[R]: + ... + + +class _AsyncBatchingMethod(Protocol, Generic[SelfType, T, R]): + async def __call__(self, self_: SelfType, __batch: List[T], /) -> List[R]: + ... + + +@overload # Sync function for `batch` called WITHOUT arguments +def batch(_sync_func: Callable[[List[T]], List[R]], /) -> Callable[[T], R]: + ... + + +@overload # Async function for `batch` called WITHOUT arguments +def batch( + _async_func: Callable[[List[T]], Coroutine[Any, Any, List[R]]], / +) -> Callable[[T], Coroutine[Any, Any, R]]: + ... + + +@overload # Sync method for `batch` called WITHOUT arguments +def batch( + _sync_meth: _SyncBatchingMethod[SelfType, T, R], / +) -> Callable[[SelfType, T], R]: + ... + + +@overload # Async method for `batch` called WITHOUT arguments +def batch( + _async_meth: _AsyncBatchingMethod[SelfType, T, R], / +) -> Callable[[SelfType, T], Coroutine[Any, Any, R]]: + ... + + +@overload # `batch` called WITH arguments +def batch( + _: Literal[None] = None, + /, + max_batch_size: int = 10, + batch_wait_timeout_s: float = 0.0, +) -> "_BatchDecorator": + ... + + +class _BatchDecorator(Protocol): + """Descibes behaviour of decorator produced by calling `batch` with arguments""" + + @overload # Sync function + def __call__(self, _sync_func: Callable[[List[T]], List[R]], /) -> Callable[[T], R]: + ... + + @overload # Async function + def __call__( + self, _async_func: Callable[[List[T]], Coroutine[Any, Any, List[R]]], / + ) -> Callable[[T], Coroutine[Any, Any, R]]: + ... + + @overload # Sync method + def __call__( + self, _sync_meth: _SyncBatchingMethod[SelfType, T, R], / + ) -> Callable[[SelfType, T], R]: + ... + + @overload # Async method + def __call__( + self, _async_meth: _AsyncBatchingMethod[SelfType, T, R], / + ) -> Callable[[SelfType, T], Coroutine[Any, Any, R]]: + ... + + +@PublicAPI(stability="stable") +def batch( + _func: Optional[Callable] = None, + /, + max_batch_size: int = 10, + batch_wait_timeout_s: float = 0.0, +) -> Callable: + """Converts a function to asynchronously handle batches. + + The function can be a standalone function or a class method. In both + cases, the function must be `async def` and take a list of objects as + its sole argument and return a list of the same length as a result. + + When invoked, the caller passes a single object. These will be batched + and executed asynchronously once there is a batch of `max_batch_size` + or `batch_wait_timeout_s` has elapsed, whichever occurs first. + + `max_batch_size` and `batch_wait_timeout_s` can be updated using setter + methods from the batch_handler (`set_max_batch_size` and + `set_batch_wait_timeout_s`). + + Example: + + .. code-block:: python + + from ray import serve + from starlette.requests import Request + + @serve.deployment + class BatchedDeployment: + @serve.batch(max_batch_size=10, batch_wait_timeout_s=0.1) + async def batch_handler(self, requests: List[Request]) -> List[str]: + response_batch = [] + for r in requests: + name = (await requests.json())["name"] + response_batch.append(f"Hello {name}!") + + return response_batch + + def update_batch_params(self, max_batch_size, batch_wait_timeout_s): + self.batch_handler.set_max_batch_size(max_batch_size) + self.batch_handler.set_batch_wait_timeout_s(batch_wait_timeout_s) + + async def __call__(self, request: Request): + return await self.batch_handler(request) + + app = BatchedDeployment.bind() + + Arguments: + max_batch_size: the maximum batch size that will be executed in + one call to the underlying function. + batch_wait_timeout_s: the maximum duration to wait for + `max_batch_size` elements before running the current batch. + """ + # `_func` will be None in the case when the decorator is parametrized. + # See the comment at the end of this function for a detailed explanation. + if _func is not None: + if not callable(_func): + raise TypeError( + "@serve.batch can only be used to decorate functions or methods." + ) + + if not iscoroutinefunction(_func): + raise TypeError("Functions decorated with @serve.batch must be 'async def'") + + _validate_max_batch_size(max_batch_size) + _validate_batch_wait_timeout_s(batch_wait_timeout_s) + + def _batch_decorator(_func): + lazy_batch_queue_wrapper = _LazyBatchQueueWrapper( + max_batch_size, + batch_wait_timeout_s, + _func, + ) + + async def batch_handler_generator( + first_future: asyncio.Future, + ) -> AsyncGenerator: + """Generator that handles generator batch functions.""" + + future = first_future + while True: + try: + async_response: _GeneratorResult = await future + future = async_response.next_future + yield async_response.result + except StopAsyncIteration: + break + + def enqueue_request(args, kwargs) -> asyncio.Future: + flattened_args: List = flatten_args(extract_signature(_func), args, kwargs) + + # If the function is a method, remove self as an argument. + self = extract_self_if_method_call(args, _func) + if self is not None: + flattened_args = flattened_args[2:] + + batch_queue = lazy_batch_queue_wrapper.queue + + future = get_or_create_event_loop().create_future() + batch_queue.put(_SingleRequest(self, flattened_args, future)) + return future + + @wraps(_func) + def generator_batch_wrapper(*args, **kwargs): + first_future = enqueue_request(args, kwargs) + return batch_handler_generator(first_future) + + @wraps(_func) + async def batch_wrapper(*args, **kwargs): + # This will raise if the underlying call raised an exception. + return await enqueue_request(args, kwargs) + + if isasyncgenfunction(_func): + wrapper = generator_batch_wrapper + else: + wrapper = batch_wrapper + + # We store the lazy_batch_queue_wrapper's getters and setters as + # batch_wrapper attributes, so they can be accessed in user code. + wrapper._get_max_batch_size = lazy_batch_queue_wrapper.get_max_batch_size + wrapper._get_batch_wait_timeout_s = ( + lazy_batch_queue_wrapper.get_batch_wait_timeout_s + ) + wrapper.set_max_batch_size = lazy_batch_queue_wrapper.set_max_batch_size + wrapper.set_batch_wait_timeout_s = ( + lazy_batch_queue_wrapper.set_batch_wait_timeout_s + ) + + # Store debugging methods in the lazy_batch_queue wrapper + wrapper._get_curr_iteration_start_time = ( + lazy_batch_queue_wrapper._get_curr_iteration_start_time + ) + wrapper._is_batching_task_alive = ( + lazy_batch_queue_wrapper._is_batching_task_alive + ) + wrapper._get_handling_task_stack = ( + lazy_batch_queue_wrapper._get_handling_task_stack + ) + + return wrapper + + # Unfortunately, this is required to handle both non-parametrized + # (@serve.batch) and parametrized (@serve.batch(**kwargs)) usage. + # In the former case, `serve.batch` will be called with the underlying + # function as the sole argument. In the latter case, it will first be + # called with **kwargs, then the result of that call will be called + # with the underlying function as the sole argument (i.e., it must be a + # "decorator factory."). + return _batch_decorator(_func) if callable(_func) else _batch_decorator + + +def _set_result_if_not_done(future: asyncio.Future, result: Any): + """Sets the future's result if the future is not done.""" + + if not future.done(): + future.set_result(result) + + +def _set_exception_if_not_done(future: asyncio.Future, exception: Any): + """Sets the future's exception if the future is not done.""" + + if not future.done(): + future.set_exception(exception) diff --git a/.venv/lib/python3.11/site-packages/ray/serve/config.py b/.venv/lib/python3.11/site-packages/ray/serve/config.py new file mode 100644 index 0000000000000000000000000000000000000000..e7189a1ceb1b60538999d8f555016fc0e5394ebd --- /dev/null +++ b/.venv/lib/python3.11/site-packages/ray/serve/config.py @@ -0,0 +1,339 @@ +import logging +import warnings +from enum import Enum +from typing import Any, Callable, List, Optional, Union + +from ray import cloudpickle +from ray._private.pydantic_compat import ( + BaseModel, + Field, + NonNegativeFloat, + NonNegativeInt, + PositiveFloat, + PositiveInt, + PrivateAttr, + validator, +) +from ray._private.utils import import_attr +from ray.serve._private.constants import ( + DEFAULT_AUTOSCALING_POLICY, + DEFAULT_GRPC_PORT, + DEFAULT_HTTP_HOST, + DEFAULT_HTTP_PORT, + DEFAULT_TARGET_ONGOING_REQUESTS, + DEFAULT_UVICORN_KEEP_ALIVE_TIMEOUT_S, + SERVE_LOGGER_NAME, +) +from ray.util.annotations import Deprecated, PublicAPI + +logger = logging.getLogger(SERVE_LOGGER_NAME) + + +@PublicAPI(stability="stable") +class AutoscalingConfig(BaseModel): + """Config for the Serve Autoscaler.""" + + # Please keep these options in sync with those in + # `src/ray/protobuf/serve.proto`. + + # Publicly exposed options + min_replicas: NonNegativeInt = 1 + initial_replicas: Optional[NonNegativeInt] = None + max_replicas: PositiveInt = 1 + + target_ongoing_requests: PositiveFloat = DEFAULT_TARGET_ONGOING_REQUESTS + + # How often to scrape for metrics + metrics_interval_s: PositiveFloat = 10.0 + # Time window to average over for metrics. + look_back_period_s: PositiveFloat = 30.0 + + # DEPRECATED + smoothing_factor: PositiveFloat = 1.0 + # DEPRECATED: replaced by `downscaling_factor` + upscale_smoothing_factor: Optional[PositiveFloat] = Field( + default=None, description="[DEPRECATED] Please use `upscaling_factor` instead." + ) + # DEPRECATED: replaced by `upscaling_factor` + downscale_smoothing_factor: Optional[PositiveFloat] = Field( + default=None, + description="[DEPRECATED] Please use `downscaling_factor` instead.", + ) + + # Multiplicative "gain" factor to limit scaling decisions + upscaling_factor: Optional[PositiveFloat] = None + downscaling_factor: Optional[PositiveFloat] = None + + # How frequently to make autoscaling decisions + # loop_period_s: float = CONTROL_LOOP_PERIOD_S + # How long to wait before scaling down replicas + downscale_delay_s: NonNegativeFloat = 600.0 + # How long to wait before scaling up replicas + upscale_delay_s: NonNegativeFloat = 30.0 + + # Cloudpickled policy definition. + _serialized_policy_def: bytes = PrivateAttr(default=b"") + + # Custom autoscaling config. Defaults to the request-based autoscaler. + _policy: Union[str, Callable] = PrivateAttr(default=DEFAULT_AUTOSCALING_POLICY) + + @validator("max_replicas", always=True) + def replicas_settings_valid(cls, max_replicas, values): + min_replicas = values.get("min_replicas") + initial_replicas = values.get("initial_replicas") + if min_replicas is not None and max_replicas < min_replicas: + raise ValueError( + f"max_replicas ({max_replicas}) must be greater than " + f"or equal to min_replicas ({min_replicas})!" + ) + + if initial_replicas is not None: + if initial_replicas < min_replicas: + raise ValueError( + f"min_replicas ({min_replicas}) must be less than " + f"or equal to initial_replicas ({initial_replicas})!" + ) + elif initial_replicas > max_replicas: + raise ValueError( + f"max_replicas ({max_replicas}) must be greater than " + f"or equal to initial_replicas ({initial_replicas})!" + ) + + return max_replicas + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.serialize_policy() + + def serialize_policy(self) -> None: + """Serialize policy with cloudpickle. + + Import the policy if it's passed in as a string import path. Then cloudpickle + the policy and set `serialized_policy_def` if it's empty. + """ + values = self.dict() + policy = values.get("_policy") + if isinstance(policy, Callable): + policy = f"{policy.__module__}.{policy.__name__}" + + if not policy: + policy = DEFAULT_AUTOSCALING_POLICY + + policy_path = policy + policy = import_attr(policy) + + if not values.get("_serialized_policy_def"): + self._serialized_policy_def = cloudpickle.dumps(policy) + self._policy = policy_path + + @classmethod + def default(cls): + return cls( + target_ongoing_requests=DEFAULT_TARGET_ONGOING_REQUESTS, + min_replicas=1, + max_replicas=100, + ) + + def get_policy(self) -> Callable: + """Deserialize policy from cloudpickled bytes.""" + return cloudpickle.loads(self._serialized_policy_def) + + def get_upscaling_factor(self) -> PositiveFloat: + if self.upscaling_factor: + return self.upscaling_factor + + return self.upscale_smoothing_factor or self.smoothing_factor + + def get_downscaling_factor(self) -> PositiveFloat: + if self.downscaling_factor: + return self.downscaling_factor + + return self.downscale_smoothing_factor or self.smoothing_factor + + def get_target_ongoing_requests(self) -> PositiveFloat: + return self.target_ongoing_requests + + +# Keep in sync with ServeDeploymentMode in dashboard/client/src/type/serve.ts +@Deprecated +class DeploymentMode(str, Enum): + NoServer = "NoServer" + HeadOnly = "HeadOnly" + EveryNode = "EveryNode" + + +@PublicAPI(stability="stable") +class ProxyLocation(str, Enum): + """Config for where to run proxies to receive ingress traffic to the cluster. + + Options: + + - Disabled: don't run proxies at all. This should be used if you are only + making calls to your applications via deployment handles. + - HeadOnly: only run a single proxy on the head node. + - EveryNode: run a proxy on every node in the cluster that has at least one + replica actor. This is the default. + """ + + Disabled = "Disabled" + HeadOnly = "HeadOnly" + EveryNode = "EveryNode" + + @classmethod + def _to_deployment_mode( + cls, proxy_location: Union["ProxyLocation", str] + ) -> DeploymentMode: + if isinstance(proxy_location, str): + proxy_location = ProxyLocation(proxy_location) + elif not isinstance(proxy_location, ProxyLocation): + raise TypeError( + f"Must be a `ProxyLocation` or str, got: {type(proxy_location)}." + ) + + if proxy_location == ProxyLocation.Disabled: + return DeploymentMode.NoServer + else: + return DeploymentMode(proxy_location.value) + + @classmethod + def _from_deployment_mode( + cls, deployment_mode: Optional[Union[DeploymentMode, str]] + ) -> Optional["ProxyLocation"]: + """Converts DeploymentMode enum into ProxyLocation enum. + + DeploymentMode is a deprecated version of ProxyLocation that's still + used internally throughout Serve. + """ + + if deployment_mode is None: + return None + elif isinstance(deployment_mode, str): + deployment_mode = DeploymentMode(deployment_mode) + elif not isinstance(deployment_mode, DeploymentMode): + raise TypeError( + f"Must be a `DeploymentMode` or str, got: {type(deployment_mode)}." + ) + + if deployment_mode == DeploymentMode.NoServer: + return ProxyLocation.Disabled + else: + return ProxyLocation(deployment_mode.value) + + +@PublicAPI(stability="stable") +class HTTPOptions(BaseModel): + """HTTP options for the proxies. Supported fields: + + - host: Host that the proxies listens for HTTP on. Defaults to + "127.0.0.1". To expose Serve publicly, you probably want to set + this to "0.0.0.0". + - port: Port that the proxies listen for HTTP on. Defaults to 8000. + - root_path: An optional root path to mount the serve application + (for example, "/prefix"). All deployment routes are prefixed + with this path. + - request_timeout_s: End-to-end timeout for HTTP requests. + - keep_alive_timeout_s: Duration to keep idle connections alive when no + requests are ongoing. + + - location: [DEPRECATED: use `proxy_location` field instead] The deployment + location of HTTP servers: + + - "HeadOnly": start one HTTP server on the head node. Serve + assumes the head node is the node you executed serve.start + on. This is the default. + - "EveryNode": start one HTTP server per node. + - "NoServer": disable HTTP server. + + - num_cpus: [DEPRECATED] The number of CPU cores to reserve for each + internal Serve HTTP proxy actor. + """ + + host: Optional[str] = DEFAULT_HTTP_HOST + port: int = DEFAULT_HTTP_PORT + middlewares: List[Any] = [] + location: Optional[DeploymentMode] = DeploymentMode.HeadOnly + num_cpus: int = 0 + root_url: str = "" + root_path: str = "" + request_timeout_s: Optional[float] = None + keep_alive_timeout_s: int = DEFAULT_UVICORN_KEEP_ALIVE_TIMEOUT_S + + @validator("location", always=True) + def location_backfill_no_server(cls, v, values): + if values["host"] is None or v is None: + return DeploymentMode.NoServer + + return v + + @validator("middlewares", always=True) + def warn_for_middlewares(cls, v, values): + if v: + warnings.warn( + "Passing `middlewares` to HTTPOptions is deprecated and will be " + "removed in a future version. Consider using the FastAPI integration " + "to configure middlewares on your deployments: " + "https://docs.ray.io/en/latest/serve/http-guide.html#fastapi-http-deployments" # noqa 501 + ) + return v + + @validator("num_cpus", always=True) + def warn_for_num_cpus(cls, v, values): + if v: + warnings.warn( + "Passing `num_cpus` to HTTPOptions is deprecated and will be " + "removed in a future version." + ) + return v + + class Config: + validate_assignment = True + arbitrary_types_allowed = True + + +@PublicAPI(stability="alpha") +class gRPCOptions(BaseModel): + """gRPC options for the proxies. Supported fields: + + Args: + port (int): + Port for gRPC server if started. Default to 9000. Cannot be + updated once Serve has started running. Serve must be shut down and + restarted with the new port instead. + grpc_servicer_functions (List[str]): + List of import paths for gRPC `add_servicer_to_server` functions to add to + Serve's gRPC proxy. Default to empty list, which means no gRPC methods will + be added and no gRPC server will be started. The servicer functions need to + be importable from the context of where Serve is running. + """ + + port: int = DEFAULT_GRPC_PORT + grpc_servicer_functions: List[str] = [] + + @property + def grpc_servicer_func_callable(self) -> List[Callable]: + """Return a list of callable functions from the grpc_servicer_functions. + + If the function is not callable or not found, it will be ignored and a warning + will be logged. + """ + callables = [] + for func in self.grpc_servicer_functions: + try: + imported_func = import_attr(func) + if callable(imported_func): + callables.append(imported_func) + else: + message = ( + f"{func} is not a callable function! Please make sure " + "the function is imported correctly." + ) + raise ValueError(message) + except ModuleNotFoundError as e: + message = ( + f"{func} can't be imported! Please make sure there are no typo " + "in those functions. Or you might want to rebuild service " + "definitions if .proto file is changed." + ) + raise ModuleNotFoundError(message) from e + + return callables diff --git a/.venv/lib/python3.11/site-packages/ray/serve/context.py b/.venv/lib/python3.11/site-packages/ray/serve/context.py new file mode 100644 index 0000000000000000000000000000000000000000..8febf452df26c1b3e8b6f11759d808655410d416 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/ray/serve/context.py @@ -0,0 +1,261 @@ +""" +This file stores global state for a Serve application. Deployment replicas +can use this state to access metadata or the Serve controller. +""" + +import asyncio +import contextvars +import logging +from collections import defaultdict +from dataclasses import dataclass +from typing import Callable, Dict, Optional + +import ray +from ray.exceptions import RayActorError +from ray.serve._private.client import ServeControllerClient +from ray.serve._private.common import ReplicaID +from ray.serve._private.config import DeploymentConfig +from ray.serve._private.constants import ( + SERVE_CONTROLLER_NAME, + SERVE_LOGGER_NAME, + SERVE_NAMESPACE, +) +from ray.serve.exceptions import RayServeException +from ray.serve.grpc_util import RayServegRPCContext +from ray.util.annotations import DeveloperAPI + +logger = logging.getLogger(SERVE_LOGGER_NAME) + +_INTERNAL_REPLICA_CONTEXT: "ReplicaContext" = None +_global_client: ServeControllerClient = None + + +@DeveloperAPI +@dataclass +class ReplicaContext: + """Stores runtime context info for replicas. + + Fields: + - app_name: name of the application the replica is a part of. + - deployment: name of the deployment the replica is a part of. + - replica_tag: unique ID for the replica. + - servable_object: instance of the user class/function this replica is running. + """ + + replica_id: ReplicaID + servable_object: Callable + _deployment_config: DeploymentConfig + + @property + def app_name(self) -> str: + return self.replica_id.deployment_id.app_name + + @property + def deployment(self) -> str: + return self.replica_id.deployment_id.name + + @property + def replica_tag(self) -> str: + return self.replica_id.unique_id + + +def _get_global_client( + _health_check_controller: bool = False, raise_if_no_controller_running: bool = True +) -> Optional[ServeControllerClient]: + """Gets the global client, which stores the controller's handle. + + Args: + _health_check_controller: If True, run a health check on the + cached controller if it exists. If the check fails, try reconnecting + to the controller. + raise_if_no_controller_running: Whether to raise an exception if + there is no currently running Serve controller. + + Returns: + ServeControllerClient to the running Serve controller. If there + is no running controller and raise_if_no_controller_running is + set to False, returns None. + + Raises: + RayServeException: if there is no running Serve controller actor + and raise_if_no_controller_running is set to True. + """ + + try: + if _global_client is not None: + if _health_check_controller: + ray.get(_global_client._controller.check_alive.remote()) + return _global_client + except RayActorError: + logger.info("The cached controller has died. Reconnecting.") + _set_global_client(None) + + return _connect(raise_if_no_controller_running) + + +def _set_global_client(client): + global _global_client + _global_client = client + + +def _get_internal_replica_context(): + return _INTERNAL_REPLICA_CONTEXT + + +def _set_internal_replica_context( + *, + replica_id: ReplicaID, + servable_object: Callable, + _deployment_config: DeploymentConfig, +): + global _INTERNAL_REPLICA_CONTEXT + _INTERNAL_REPLICA_CONTEXT = ReplicaContext( + replica_id=replica_id, + servable_object=servable_object, + _deployment_config=_deployment_config, + ) + + +def _connect(raise_if_no_controller_running: bool = True) -> ServeControllerClient: + """Connect to an existing Serve application on this Ray cluster. + + If called from within a replica, this will connect to the same Serve + app that the replica is running in. + + Returns: + ServeControllerClient that encapsulates a Ray actor handle to the + existing Serve application's Serve Controller. None if there is + no running Serve controller actor and raise_if_no_controller_running + is set to False. + Raises: + RayServeException: if there is no running Serve controller actor + and raise_if_no_controller_running is set to True. + """ + + # Initialize ray if needed. + ray._private.worker.global_worker._filter_logs_by_job = False + if not ray.is_initialized(): + ray.init(namespace=SERVE_NAMESPACE) + + # Try to get serve controller if it exists + try: + controller = ray.get_actor(SERVE_CONTROLLER_NAME, namespace=SERVE_NAMESPACE) + except ValueError: + if raise_if_no_controller_running: + raise RayServeException( + "There is no Serve instance running on this Ray cluster." + ) + return + + client = ServeControllerClient( + controller, + ) + _set_global_client(client) + return client + + +# Serve request context var which is used for storing the internal +# request context information. +# route_prefix: http url route path, e.g. http://127.0.0.1:/app +# the route is "/app". When you send requests by handle, +# the route is empty. +# request_id: the request id is generated from http proxy, the value +# shouldn't be changed when the variable is set. +# This can be from the client and is used for logging. +# _internal_request_id: the request id is generated from the proxy. Used to track the +# request objects in the system. +# note: +# The request context is readonly to avoid potential +# async task conflicts when using it concurrently. + + +@dataclass(frozen=True) +class _RequestContext: + route: str = "" + request_id: str = "" + _internal_request_id: str = "" + app_name: str = "" + multiplexed_model_id: str = "" + grpc_context: Optional[RayServegRPCContext] = None + is_http_request: bool = False + + +_serve_request_context = contextvars.ContextVar( + "Serve internal request context variable", default=None +) + + +def _get_serve_request_context(): + """Get the current request context. + + Returns: + The current request context + """ + + if _serve_request_context.get() is None: + _serve_request_context.set(_RequestContext()) + return _serve_request_context.get() + + +def _set_request_context( + route: str = "", + request_id: str = "", + _internal_request_id: str = "", + app_name: str = "", + multiplexed_model_id: str = "", +): + """Set the request context. If the value is not set, + the current context value will be used.""" + + current_request_context = _get_serve_request_context() + + _serve_request_context.set( + _RequestContext( + route=route or current_request_context.route, + request_id=request_id or current_request_context.request_id, + _internal_request_id=_internal_request_id + or current_request_context._internal_request_id, + app_name=app_name or current_request_context.app_name, + multiplexed_model_id=multiplexed_model_id + or current_request_context.multiplexed_model_id, + ) + ) + + +# `_requests_pending_assignment` is a map from request ID to a +# dictionary of asyncio tasks. +# The request ID points to an ongoing request that is executing on the +# current replica, and the asyncio tasks are ongoing tasks started on +# the router to assign child requests to downstream replicas. + +# A dictionary is used over a set to track the asyncio tasks for more +# efficient addition and deletion time complexity. A uniquely generated +# `response_id` is used to identify each task. + +_requests_pending_assignment: Dict[str, Dict[str, asyncio.Task]] = defaultdict(dict) + + +# Note that the functions below that manipulate +# `_requests_pending_assignment` are NOT thread-safe. They are only +# expected to be called from the same thread/asyncio event-loop. + + +def _get_requests_pending_assignment(parent_request_id: str) -> Dict[str, asyncio.Task]: + if parent_request_id in _requests_pending_assignment: + return _requests_pending_assignment[parent_request_id] + + return {} + + +def _add_request_pending_assignment(parent_request_id: str, response_id: str, task): + # NOTE: `parent_request_id` is the `internal_request_id` corresponding + # to an ongoing Serve request, so it is always non-empty. + _requests_pending_assignment[parent_request_id][response_id] = task + + +def _remove_request_pending_assignment(parent_request_id: str, response_id: str): + if response_id in _requests_pending_assignment[parent_request_id]: + del _requests_pending_assignment[parent_request_id][response_id] + + if len(_requests_pending_assignment[parent_request_id]) == 0: + del _requests_pending_assignment[parent_request_id] diff --git a/.venv/lib/python3.11/site-packages/ray/serve/dag.py b/.venv/lib/python3.11/site-packages/ray/serve/dag.py new file mode 100644 index 0000000000000000000000000000000000000000..aee604744557d18c88245fff723480562dbfae67 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/ray/serve/dag.py @@ -0,0 +1,5 @@ +from ray.dag.input_node import InputNode + +__all__ = [ + "InputNode", +] diff --git a/.venv/lib/python3.11/site-packages/ray/serve/deployment.py b/.venv/lib/python3.11/site-packages/ray/serve/deployment.py new file mode 100644 index 0000000000000000000000000000000000000000..e88f8a5dc1f0b6a1ceb564ce4d09b6719f93fc72 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/ray/serve/deployment.py @@ -0,0 +1,508 @@ +import inspect +import logging +from copy import deepcopy +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +from ray.serve._private.config import ( + DeploymentConfig, + ReplicaConfig, + handle_num_replicas_auto, +) +from ray.serve._private.constants import SERVE_LOGGER_NAME +from ray.serve._private.usage import ServeUsageTag +from ray.serve._private.utils import DEFAULT, Default +from ray.serve.config import AutoscalingConfig +from ray.serve.schema import DeploymentSchema, LoggingConfig, RayActorOptionsSchema +from ray.util.annotations import PublicAPI + +logger = logging.getLogger(SERVE_LOGGER_NAME) + + +@PublicAPI(stability="stable") +class Application: + """One or more deployments bound with arguments that can be deployed together. + + Can be passed into another `Deployment.bind()` to compose multiple deployments in a + single application, passed to `serve.run`, or deployed via a Serve config file. + + For example, to define an Application and run it in Python: + + .. code-block:: python + + from ray import serve + from ray.serve import Application + + @serve.deployment + class MyDeployment: + pass + + app: Application = MyDeployment.bind(OtherDeployment.bind()) + serve.run(app) + + To run the same app using the command line interface (CLI): + + .. code-block:: bash + + serve run python_file:app + + To deploy the same app via a config file: + + .. code-block:: yaml + + applications: + my_app: + import_path: python_file:app + + """ + + def __init__(self, bound_deployment: "Deployment"): + # This is used by `build_app`, but made private so users don't use it. + self._bound_deployment = bound_deployment + + +@PublicAPI(stability="stable") +class Deployment: + """Class (or function) decorated with the `@serve.deployment` decorator. + + This is run on a number of replica actors. Requests to those replicas call + this class. + + One or more deployments can be composed together into an `Application` which is + then run via `serve.run` or a config file. + + Example: + + .. code-block:: python + + @serve.deployment + class MyDeployment: + def __init__(self, name: str): + self._name = name + + def __call__(self, request): + return "Hello world!" + + app = MyDeployment.bind() + # Run via `serve.run` or the `serve run` CLI command. + serve.run(app) + + """ + + def __init__( + self, + name: str, + deployment_config: DeploymentConfig, + replica_config: ReplicaConfig, + version: Optional[str] = None, + _internal=False, + ) -> None: + if not _internal: + raise RuntimeError( + "The Deployment constructor should not be called " + "directly. Use `@serve.deployment` instead." + ) + if not isinstance(name, str): + raise TypeError("name must be a string.") + if not (version is None or isinstance(version, str)): + raise TypeError("version must be a string.") + docs_path = None + if ( + inspect.isclass(replica_config.deployment_def) + and hasattr(replica_config.deployment_def, "__module__") + and replica_config.deployment_def.__module__ == "ray.serve.api" + and hasattr(replica_config.deployment_def, "__fastapi_docs_path__") + ): + docs_path = replica_config.deployment_def.__fastapi_docs_path__ + + self._name = name + self._version = version + self._deployment_config = deployment_config + self._replica_config = replica_config + self._docs_path = docs_path + + @property + def name(self) -> str: + """Unique name of this deployment.""" + return self._name + + @property + def version(self) -> Optional[str]: + return self._version + + @property + def func_or_class(self) -> Union[Callable, str]: + """Underlying class or function that this deployment wraps.""" + return self._replica_config.deployment_def + + @property + def num_replicas(self) -> int: + """Target number of replicas.""" + return self._deployment_config.num_replicas + + @property + def user_config(self) -> Any: + """Dynamic user-provided config options.""" + return self._deployment_config.user_config + + @property + def max_ongoing_requests(self) -> int: + """Max number of requests a replica can handle at once.""" + return self._deployment_config.max_ongoing_requests + + @property + def max_queued_requests(self) -> int: + """Max number of requests that can be queued in each deployment handle.""" + return self._deployment_config.max_queued_requests + + @property + def route_prefix(self): + raise ValueError( + "`route_prefix` can no longer be specified at the deployment level. " + "Pass it to `serve.run` or in the application config instead." + ) + + @property + def ray_actor_options(self) -> Optional[Dict]: + """Actor options such as resources required for each replica.""" + return self._replica_config.ray_actor_options + + @property + def init_args(self) -> Tuple[Any]: + return self._replica_config.init_args + + @property + def init_kwargs(self) -> Tuple[Any]: + return self._replica_config.init_kwargs + + @property + def url(self) -> Optional[str]: + logger.warning( + "DeprecationWarning: `Deployment.url` is deprecated " + "and will be removed in the future." + ) + return None + + @property + def logging_config(self) -> Dict: + return self._deployment_config.logging_config + + def set_logging_config(self, logging_config: Dict): + self._deployment_config.logging_config = logging_config + + def __call__(self): + raise RuntimeError( + "Deployments cannot be constructed directly. " + "Use `deployment.deploy() instead.`" + ) + + def bind(self, *args, **kwargs) -> Application: + """Bind the arguments to the deployment and return an Application. + + The returned Application can be deployed using `serve.run` (or via + config file) or bound to another deployment for composition. + """ + return Application(self.options(_init_args=args, _init_kwargs=kwargs)) + + def options( + self, + func_or_class: Optional[Callable] = None, + name: Default[str] = DEFAULT.VALUE, + version: Default[str] = DEFAULT.VALUE, + num_replicas: Default[Optional[Union[int, str]]] = DEFAULT.VALUE, + route_prefix: Default[Union[str, None]] = DEFAULT.VALUE, + ray_actor_options: Default[Optional[Dict]] = DEFAULT.VALUE, + placement_group_bundles: Default[List[Dict[str, float]]] = DEFAULT.VALUE, + placement_group_strategy: Default[str] = DEFAULT.VALUE, + max_replicas_per_node: Default[int] = DEFAULT.VALUE, + user_config: Default[Optional[Any]] = DEFAULT.VALUE, + max_ongoing_requests: Default[int] = DEFAULT.VALUE, + max_queued_requests: Default[int] = DEFAULT.VALUE, + autoscaling_config: Default[ + Union[Dict, AutoscalingConfig, None] + ] = DEFAULT.VALUE, + graceful_shutdown_wait_loop_s: Default[float] = DEFAULT.VALUE, + graceful_shutdown_timeout_s: Default[float] = DEFAULT.VALUE, + health_check_period_s: Default[float] = DEFAULT.VALUE, + health_check_timeout_s: Default[float] = DEFAULT.VALUE, + logging_config: Default[Union[Dict, LoggingConfig, None]] = DEFAULT.VALUE, + _init_args: Default[Tuple[Any]] = DEFAULT.VALUE, + _init_kwargs: Default[Dict[Any, Any]] = DEFAULT.VALUE, + _internal: bool = False, + ) -> "Deployment": + """Return a copy of this deployment with updated options. + + Only those options passed in will be updated, all others will remain + unchanged from the existing deployment. + + Refer to the `@serve.deployment` decorator docs for available arguments. + """ + if route_prefix is not DEFAULT.VALUE: + raise ValueError( + "`route_prefix` can no longer be specified at the deployment level. " + "Pass it to `serve.run` or in the application config instead." + ) + + # Modify max_ongoing_requests and autoscaling_config if + # `num_replicas="auto"` + if max_ongoing_requests is None: + raise ValueError("`max_ongoing_requests` must be non-null, got None.") + if num_replicas == "auto": + num_replicas = None + max_ongoing_requests, autoscaling_config = handle_num_replicas_auto( + max_ongoing_requests, autoscaling_config + ) + + ServeUsageTag.AUTO_NUM_REPLICAS_USED.record("1") + + # NOTE: The user_configured_option_names should be the first thing that's + # defined in this method. It depends on the locals() dictionary storing + # only the function args/kwargs. + # Create list of all user-configured options from keyword args + user_configured_option_names = [ + option + for option, value in locals().items() + if option not in {"self", "func_or_class", "_internal"} + and value is not DEFAULT.VALUE + ] + + new_deployment_config = deepcopy(self._deployment_config) + if not _internal: + new_deployment_config.user_configured_option_names.update( + user_configured_option_names + ) + + if num_replicas not in [ + DEFAULT.VALUE, + None, + "auto", + ] and autoscaling_config not in [ + DEFAULT.VALUE, + None, + ]: + raise ValueError( + "Manually setting num_replicas is not allowed when " + "autoscaling_config is provided." + ) + + if num_replicas == 0: + raise ValueError("num_replicas is expected to larger than 0") + + if not _internal and version is not DEFAULT.VALUE: + logger.warning( + "DeprecationWarning: `version` in `Deployment.options()` has been " + "deprecated. Explicitly specifying version will raise an error in the " + "future!" + ) + + elif num_replicas not in [DEFAULT.VALUE, None]: + new_deployment_config.num_replicas = num_replicas + + if user_config is not DEFAULT.VALUE: + new_deployment_config.user_config = user_config + + if max_ongoing_requests is not DEFAULT.VALUE: + new_deployment_config.max_ongoing_requests = max_ongoing_requests + + if max_queued_requests is not DEFAULT.VALUE: + new_deployment_config.max_queued_requests = max_queued_requests + + if func_or_class is None: + func_or_class = self._replica_config.deployment_def + + if name is DEFAULT.VALUE: + name = self._name + + if version is DEFAULT.VALUE: + version = self._version + + if _init_args is DEFAULT.VALUE: + _init_args = self._replica_config.init_args + + if _init_kwargs is DEFAULT.VALUE: + _init_kwargs = self._replica_config.init_kwargs + + if ray_actor_options is DEFAULT.VALUE: + ray_actor_options = self._replica_config.ray_actor_options + + if placement_group_bundles is DEFAULT.VALUE: + placement_group_bundles = self._replica_config.placement_group_bundles + + if placement_group_strategy is DEFAULT.VALUE: + placement_group_strategy = self._replica_config.placement_group_strategy + + if max_replicas_per_node is DEFAULT.VALUE: + max_replicas_per_node = self._replica_config.max_replicas_per_node + + if autoscaling_config is not DEFAULT.VALUE: + new_deployment_config.autoscaling_config = autoscaling_config + + if graceful_shutdown_wait_loop_s is not DEFAULT.VALUE: + new_deployment_config.graceful_shutdown_wait_loop_s = ( + graceful_shutdown_wait_loop_s + ) + + if graceful_shutdown_timeout_s is not DEFAULT.VALUE: + new_deployment_config.graceful_shutdown_timeout_s = ( + graceful_shutdown_timeout_s + ) + + if health_check_period_s is not DEFAULT.VALUE: + new_deployment_config.health_check_period_s = health_check_period_s + + if health_check_timeout_s is not DEFAULT.VALUE: + new_deployment_config.health_check_timeout_s = health_check_timeout_s + + if logging_config is not DEFAULT.VALUE: + if isinstance(logging_config, LoggingConfig): + logging_config = logging_config.dict() + new_deployment_config.logging_config = logging_config + + new_replica_config = ReplicaConfig.create( + func_or_class, + init_args=_init_args, + init_kwargs=_init_kwargs, + ray_actor_options=ray_actor_options, + placement_group_bundles=placement_group_bundles, + placement_group_strategy=placement_group_strategy, + max_replicas_per_node=max_replicas_per_node, + ) + + return Deployment( + name, + new_deployment_config, + new_replica_config, + version=version, + _internal=True, + ) + + def __eq__(self, other): + return all( + [ + self._name == other._name, + self._version == other._version, + self._deployment_config == other._deployment_config, + self._replica_config.init_args == other._replica_config.init_args, + self._replica_config.init_kwargs == other._replica_config.init_kwargs, + self._replica_config.ray_actor_options + == other._replica_config.ray_actor_options, + ] + ) + + def __str__(self): + return f"Deployment(name={self._name})" + + def __repr__(self): + return str(self) + + +def deployment_to_schema(d: Deployment) -> DeploymentSchema: + """Converts a live deployment object to a corresponding structured schema. + + Args: + d: Deployment object to convert + """ + + if d.ray_actor_options is not None: + ray_actor_options_schema = RayActorOptionsSchema.parse_obj(d.ray_actor_options) + else: + ray_actor_options_schema = None + + deployment_options = { + "name": d.name, + "num_replicas": None + if d._deployment_config.autoscaling_config + else d.num_replicas, + "max_ongoing_requests": d.max_ongoing_requests, + "max_queued_requests": d.max_queued_requests, + "user_config": d.user_config, + "autoscaling_config": d._deployment_config.autoscaling_config, + "graceful_shutdown_wait_loop_s": d._deployment_config.graceful_shutdown_wait_loop_s, # noqa: E501 + "graceful_shutdown_timeout_s": d._deployment_config.graceful_shutdown_timeout_s, + "health_check_period_s": d._deployment_config.health_check_period_s, + "health_check_timeout_s": d._deployment_config.health_check_timeout_s, + "ray_actor_options": ray_actor_options_schema, + "placement_group_strategy": d._replica_config.placement_group_strategy, + "placement_group_bundles": d._replica_config.placement_group_bundles, + "max_replicas_per_node": d._replica_config.max_replicas_per_node, + "logging_config": d._deployment_config.logging_config, + } + + # Let non-user-configured options be set to defaults. If the schema + # is converted back to a deployment, this lets Serve continue tracking + # which options were set by the user. Name is a required field in the + # schema, so it should be passed in explicitly. + for option in list(deployment_options.keys()): + if ( + option != "name" + and option not in d._deployment_config.user_configured_option_names + ): + del deployment_options[option] + + # TODO(Sihan) DeploymentConfig num_replicas and auto_config can be set together + # because internally we use these two field for autoscale and deploy. + # We can improve the code after we separate the user faced deployment config and + # internal deployment config. + return DeploymentSchema(**deployment_options) + + +def schema_to_deployment(s: DeploymentSchema) -> Deployment: + """Creates a deployment with parameters specified in schema. + + The returned deployment CANNOT be deployed immediately. It's func_or_class + value is an empty string (""), which is not a valid import path. The + func_or_class value must be overwritten with a valid function or class + before the deployment can be deployed. + """ + + if s.ray_actor_options is DEFAULT.VALUE: + ray_actor_options = None + else: + ray_actor_options = s.ray_actor_options.dict(exclude_unset=True) + + if s.placement_group_bundles is DEFAULT.VALUE: + placement_group_bundles = None + else: + placement_group_bundles = s.placement_group_bundles + + if s.placement_group_strategy is DEFAULT.VALUE: + placement_group_strategy = None + else: + placement_group_strategy = s.placement_group_strategy + + if s.max_replicas_per_node is DEFAULT.VALUE: + max_replicas_per_node = None + else: + max_replicas_per_node = s.max_replicas_per_node + + deployment_config = DeploymentConfig.from_default( + num_replicas=s.num_replicas, + user_config=s.user_config, + max_ongoing_requests=s.max_ongoing_requests, + max_queued_requests=s.max_queued_requests, + autoscaling_config=s.autoscaling_config, + graceful_shutdown_wait_loop_s=s.graceful_shutdown_wait_loop_s, + graceful_shutdown_timeout_s=s.graceful_shutdown_timeout_s, + health_check_period_s=s.health_check_period_s, + health_check_timeout_s=s.health_check_timeout_s, + logging_config=s.logging_config, + ) + deployment_config.user_configured_option_names = ( + s._get_user_configured_option_names() + ) + + replica_config = ReplicaConfig.create( + deployment_def="", + init_args=(), + init_kwargs={}, + ray_actor_options=ray_actor_options, + placement_group_bundles=placement_group_bundles, + placement_group_strategy=placement_group_strategy, + max_replicas_per_node=max_replicas_per_node, + ) + + return Deployment( + name=s.name, + deployment_config=deployment_config, + replica_config=replica_config, + _internal=True, + ) diff --git a/.venv/lib/python3.11/site-packages/ray/serve/exceptions.py b/.venv/lib/python3.11/site-packages/ray/serve/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..8e661e9bc0fa94abb27d641321391e190b15c4a7 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/ray/serve/exceptions.py @@ -0,0 +1,56 @@ +from typing import Optional + +from ray.exceptions import TaskCancelledError +from ray.serve._private.common import DeploymentID +from ray.util.annotations import PublicAPI + + +@PublicAPI(stability="stable") +class RayServeException(Exception): + pass + + +@PublicAPI(stability="alpha") +class BackPressureError(RayServeException): + """Raised when max_queued_requests is exceeded on a DeploymentHandle.""" + + def __init__(self, *, num_queued_requests: int, max_queued_requests: int): + self._message = ( + f"Request dropped due to backpressure " + f"(num_queued_requests={num_queued_requests}, " + f"max_queued_requests={max_queued_requests})." + ) + super().__init__(self._message) + + @property + def message(self) -> str: + return self._message + + +@PublicAPI(stability="alpha") +class RequestCancelledError(RayServeException, TaskCancelledError): + """Raise when a Serve request is cancelled.""" + + def __init__(self, request_id: Optional[str] = None): + self._request_id: Optional[str] = request_id + + def __str__(self): + if self._request_id: + return f"Request {self._request_id} was cancelled." + else: + return "Request was cancelled." + + +@PublicAPI(stability="alpha") +class DeploymentUnavailableError(RayServeException): + """Raised when a Serve deployment is unavailable to receive requests. + + Currently this happens because the deployment failed to deploy. + """ + + def __init__(self, deployment_id: DeploymentID): + self._deployment_id = deployment_id + + @property + def message(self) -> str: + return f"{self._deployment_id} is unavailable because it failed to deploy." diff --git a/.venv/lib/python3.11/site-packages/ray/serve/gradio_integrations.py b/.venv/lib/python3.11/site-packages/ray/serve/gradio_integrations.py new file mode 100644 index 0000000000000000000000000000000000000000..dc8960d78c8b41da612a2e35e41310d729e53350 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/ray/serve/gradio_integrations.py @@ -0,0 +1,32 @@ +import logging +from typing import Callable + +from ray import serve +from ray.serve._private.constants import SERVE_LOGGER_NAME +from ray.serve._private.http_util import ASGIAppReplicaWrapper +from ray.util.annotations import PublicAPI + +try: + from gradio import Blocks, routes +except ModuleNotFoundError: + print("Gradio isn't installed. Run `pip install gradio` to install Gradio.") + raise + +logger = logging.getLogger(SERVE_LOGGER_NAME) + + +@PublicAPI(stability="alpha") +class GradioIngress(ASGIAppReplicaWrapper): + """User-facing class that wraps a Gradio App in a Serve Deployment.""" + + def __init__(self, builder: Callable[[], Blocks]): + """Builds and wraps an ASGI app from the provided builder. + + The builder should take no arguments and return a Gradio App (of type Interface + or Blocks). + """ + io: Blocks = builder() + super().__init__(routes.App.create_app(io)) + + +GradioServer = serve.deployment(GradioIngress) diff --git a/.venv/lib/python3.11/site-packages/ray/serve/grpc_util.py b/.venv/lib/python3.11/site-packages/ray/serve/grpc_util.py new file mode 100644 index 0000000000000000000000000000000000000000..b0c132c40f18f16b7a0166c8ff63571ec17e3d68 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/ray/serve/grpc_util.py @@ -0,0 +1,169 @@ +from typing import Any, Dict, List, Optional, Tuple + +import grpc + +from ray.util.annotations import PublicAPI + + +@PublicAPI(stability="beta") +class RayServegRPCContext: + """Context manager to set and get gRPC context. + + This class implements most of the methods from ServicerContext + (see: https://grpc.github.io/grpc/python/grpc.html#grpc.ServicerContext). It's + serializable and can be passed with the request to be used on the deployment. + """ + + def __init__(self, grpc_context: grpc._cython.cygrpc._ServicerContext): + self._auth_context = grpc_context.auth_context() + self._code = grpc_context.code() + self._details = grpc_context.details() + self._invocation_metadata = [ + (key, value) for key, value in grpc_context.invocation_metadata() + ] + self._peer = grpc_context.peer() + self._peer_identities = grpc_context.peer_identities() + self._peer_identity_key = grpc_context.peer_identity_key() + self._trailing_metadata = [ + (key, value) for key, value in grpc_context.trailing_metadata() + ] + self._compression = None + + def auth_context(self) -> Dict[str, Any]: + """Gets the auth context for the call. + + Returns: + A map of strings to an iterable of bytes for each auth property. + """ + return self._auth_context + + def code(self) -> grpc.StatusCode: + """Accesses the value to be used as status code upon RPC completion. + + Returns: + The StatusCode value for the RPC. + """ + return self._code + + def details(self) -> str: + """Accesses the value to be used as detail string upon RPC completion. + + Returns: + The details string of the RPC. + """ + return self._details + + def invocation_metadata(self) -> List[Tuple[str, str]]: + """Accesses the metadata sent by the client. + + Returns: + The invocation :term:`metadata`. + """ + return self._invocation_metadata + + def peer(self) -> str: + """Identifies the peer that invoked the RPC being serviced. + + Returns: + A string identifying the peer that invoked the RPC being serviced. + The string format is determined by gRPC runtime. + """ + return self._peer + + def peer_identities(self) -> Optional[bytes]: + """Gets one or more peer identity(s). + + Equivalent to + servicer_context.auth_context().get(servicer_context.peer_identity_key()) + + Returns: + An iterable of the identities, or None if the call is not + authenticated. Each identity is returned as a raw bytes type. + """ + return self._peer_identities + + def peer_identity_key(self) -> Optional[str]: + """The auth property used to identify the peer. + + For example, "x509_common_name" or "x509_subject_alternative_name" are + used to identify an SSL peer. + + Returns: + The auth property (string) that indicates the + peer identity, or None if the call is not authenticated. + """ + return self._peer_identity_key + + def trailing_metadata(self) -> List[Tuple[str, str]]: + return self._trailing_metadata + + def set_code(self, code: grpc.StatusCode): + """Sets the value to be used as status code upon RPC completion. + + This method need not be called by method implementations if they wish + the gRPC runtime to determine the status code of the RPC. + + Args: + code: A StatusCode object to be sent to the client. + """ + self._code = code + + def set_compression(self, compression: grpc.Compression): + """Set the compression algorithm to be used for the entire call. + + Args: + compression: An element of grpc.compression, e.g. + grpc.compression.Gzip. + """ + self._compression = compression + + def set_details(self, details: str): + """Sets the value to be used as detail string upon RPC completion. + + This method need not be called by method implementations if they have + no details to transmit. + + Args: + details: A UTF-8-encodable string to be sent to the client upon + termination of the RPC. + """ + self._details = details + + def _request_id_metadata(self) -> List[Tuple[str, str]]: + # Request id metadata should be carried over to the trailing metadata and passed + # back to the request client. This function helps pick it out if it exists. + for key, value in self._trailing_metadata: + if key == "request_id": + return [(key, value)] + return [] + + def set_trailing_metadata(self, trailing_metadata: List[Tuple[str, str]]): + """Sets the trailing metadata for the RPC. + + Sets the trailing metadata to be sent upon completion of the RPC. + + If this method is invoked multiple times throughout the lifetime of an + RPC, the value supplied in the final invocation + request id will be the value + sent over the wire. + + This method need not be called by implementations if they have no + metadata to add to what the gRPC runtime will transmit. + + Args: + trailing_metadata: The trailing :term:`metadata`. + """ + self._trailing_metadata = self._request_id_metadata() + trailing_metadata + + def _set_on_grpc_context(self, grpc_context: grpc._cython.cygrpc._ServicerContext): + """Serve's internal method to set attributes on the gRPC context.""" + if self._code: + grpc_context.set_code(self._code) + + if self._compression: + grpc_context.set_compression(self._compression) + + if self._details: + grpc_context.set_details(self._details) + + if self._trailing_metadata: + grpc_context.set_trailing_metadata(self._trailing_metadata) diff --git a/.venv/lib/python3.11/site-packages/ray/serve/handle.py b/.venv/lib/python3.11/site-packages/ray/serve/handle.py new file mode 100644 index 0000000000000000000000000000000000000000..6e8eef371bfebc507868096eb575292b0743567e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/ray/serve/handle.py @@ -0,0 +1,738 @@ +import asyncio +import concurrent.futures +import logging +import time +import warnings +from typing import Any, AsyncIterator, Dict, Iterator, Optional, Tuple, Union + +import ray +from ray import serve +from ray._raylet import ObjectRefGenerator +from ray.serve._private.common import ( + DeploymentHandleSource, + DeploymentID, + RequestMetadata, +) +from ray.serve._private.constants import SERVE_LOGGER_NAME +from ray.serve._private.default_impl import ( + CreateRouterCallable, + create_dynamic_handle_options, + create_init_handle_options, + create_router, +) +from ray.serve._private.handle_options import ( + DynamicHandleOptionsBase, + InitHandleOptionsBase, +) +from ray.serve._private.replica_result import ReplicaResult +from ray.serve._private.router import Router +from ray.serve._private.usage import ServeUsageTag +from ray.serve._private.utils import ( + DEFAULT, + calculate_remaining_timeout, + get_random_string, + inside_ray_client_context, + is_running_in_asyncio_loop, +) +from ray.serve.exceptions import RayServeException, RequestCancelledError +from ray.util import metrics +from ray.util.annotations import DeveloperAPI, PublicAPI + +logger = logging.getLogger(SERVE_LOGGER_NAME) + + +class _DeploymentHandleBase: + def __init__( + self, + deployment_name: str, + app_name: str, + *, + init_options: Optional[InitHandleOptionsBase] = None, + handle_options: Optional[DynamicHandleOptionsBase] = None, + _router: Optional[Router] = None, + _create_router: Optional[CreateRouterCallable] = None, + _request_counter: Optional[metrics.Counter] = None, + _handle_id: Optional[str] = None, + ): + self.deployment_id = DeploymentID(name=deployment_name, app_name=app_name) + self.init_options: Optional[InitHandleOptionsBase] = init_options + self.handle_options: DynamicHandleOptionsBase = ( + handle_options or create_dynamic_handle_options() + ) + + # Handle ID is shared among handles that are returned by + # `handle.options` or `handle.method` + self.handle_id = _handle_id or get_random_string() + self.request_counter = _request_counter or self._create_request_counter( + app_name, deployment_name, self.handle_id + ) + + self._router: Optional[Router] = _router + if _create_router is None: + self._create_router = create_router + else: + self._create_router = _create_router + + @staticmethod + def _gen_handle_tag(app_name: str, deployment_name: str, handle_id: str): + if app_name: + return f"{app_name}#{deployment_name}#{handle_id}" + else: + return f"{deployment_name}#{handle_id}" + + @classmethod + def _create_request_counter( + cls, app_name: str, deployment_name: str, handle_id: str + ): + return metrics.Counter( + "serve_handle_request_counter", + description=( + "The number of handle.remote() calls that have been " + "made on this handle." + ), + tag_keys=("handle", "deployment", "route", "application"), + ).set_default_tags( + { + "handle": cls._gen_handle_tag( + app_name, deployment_name, handle_id=handle_id + ), + "deployment": deployment_name, + "application": app_name, + } + ) + + def running_replicas_populated(self) -> bool: + if self._router is None: + return False + + return self._router.running_replicas_populated() + + @property + def deployment_name(self) -> str: + return self.deployment_id.name + + @property + def app_name(self) -> str: + return self.deployment_id.app_name + + @property + def is_initialized(self) -> bool: + return self._router is not None + + def _init(self, **kwargs): + """Initialize this handle with arguments. + + A handle can only be initialized once. A handle is implicitly + initialized when `.options()` or `.remote()` is called. Therefore + to initialize a handle with custom init options, you must do it + before calling `.options()` or `.remote()`. + """ + if self._router is not None: + raise RuntimeError( + "Handle has already been initialized. Note that a handle is implicitly " + "initialized when you call `.options()` or `.remote()`. You either " + "tried to call `._init()` twice or called `._init()` after calling " + "`.options()` or `.remote()`. If you want to modify the init options, " + "please do so before calling `.options()` or `.remote()`. This handle " + f"was initialized with {self.init_options}." + ) + + init_options = create_init_handle_options(**kwargs) + self._router = self._create_router( + handle_id=self.handle_id, + deployment_id=self.deployment_id, + handle_options=init_options, + ) + self.init_options = init_options + + logger.info( + f"Initialized DeploymentHandle {self.handle_id} for {self.deployment_id}.", + extra={"log_to_stderr": False}, + ) + + # Record handle api telemetry when not in the proxy + if ( + self.init_options._source != DeploymentHandleSource.PROXY + and self.__class__ == DeploymentHandle + ): + ServeUsageTag.DEPLOYMENT_HANDLE_API_USED.record("1") + + def _options(self, _prefer_local_routing=DEFAULT.VALUE, **kwargs): + if kwargs.get("stream") is True and inside_ray_client_context(): + raise RuntimeError( + "Streaming DeploymentHandles are not currently supported when " + "connected to a remote Ray cluster using Ray Client." + ) + + new_handle_options = self.handle_options.copy_and_update(**kwargs) + + # TODO(zcin): remove when _prefer_local_routing is removed from options() path + if _prefer_local_routing != DEFAULT.VALUE: + self._init(_prefer_local_routing=_prefer_local_routing) + + if not self.is_initialized: + self._init() + + return DeploymentHandle( + self.deployment_name, + self.app_name, + init_options=self.init_options, + handle_options=new_handle_options, + _router=self._router, + _create_router=self._create_router, + _request_counter=self.request_counter, + _handle_id=self.handle_id, + ) + + def _remote( + self, + args: Tuple[Any], + kwargs: Dict[str, Any], + ) -> Tuple[concurrent.futures.Future, RequestMetadata]: + if not self.is_initialized: + self._init() + + metadata = serve._private.default_impl.get_request_metadata( + self.init_options, self.handle_options + ) + + self.request_counter.inc( + tags={ + "route": metadata.route, + "application": metadata.app_name, + } + ) + + return self._router.assign_request(metadata, *args, **kwargs), metadata + + def __getattr__(self, name): + return self.options(method_name=name) + + def shutdown(self): + if self._router: + shutdown_future = self._router.shutdown() + shutdown_future.result() + + async def shutdown_async(self): + if self._router: + shutdown_future = self._router.shutdown() + await asyncio.wrap_future(shutdown_future) + + def __repr__(self): + return f"{self.__class__.__name__}" f"(deployment='{self.deployment_name}')" + + @classmethod + def _deserialize(cls, kwargs): + """Required for this class's __reduce__ method to be picklable.""" + return cls(**kwargs) + + def __reduce__(self): + serialized_constructor_args = { + "deployment_name": self.deployment_name, + "app_name": self.app_name, + "handle_options": self.handle_options, + } + return self.__class__._deserialize, (serialized_constructor_args,) + + +class _DeploymentResponseBase: + def __init__( + self, + replica_result_future: concurrent.futures.Future[ReplicaResult], + request_metadata: RequestMetadata, + ): + self._cancelled = False + self._replica_result_future = replica_result_future + self._replica_result: Optional[ReplicaResult] = None + self._request_metadata: RequestMetadata = request_metadata + + @property + def request_id(self) -> str: + return self._request_metadata.request_id + + def _fetch_future_result_sync( + self, _timeout_s: Optional[float] = None + ) -> ReplicaResult: + """Synchronously fetch the replica result. + + The result is cached in `self._replica_result`. + """ + + if self._replica_result is None: + try: + self._replica_result = self._replica_result_future.result( + timeout=_timeout_s + ) + except concurrent.futures.TimeoutError: + raise TimeoutError("Timed out resolving to ObjectRef.") from None + except concurrent.futures.CancelledError: + raise RequestCancelledError(self.request_id) from None + + return self._replica_result + + async def _fetch_future_result_async(self) -> ReplicaResult: + """Asynchronously fetch replica result. + + The result is cached in `self._replica_result`.. + """ + + if self._replica_result is None: + # Use `asyncio.wrap_future` so `self._replica_result_future` can be awaited + # safely from any asyncio loop. + try: + self._replica_result = await asyncio.wrap_future( + self._replica_result_future + ) + except asyncio.CancelledError: + raise RequestCancelledError(self.request_id) from None + + return self._replica_result + + def cancel(self): + """Attempt to cancel the `DeploymentHandle` call. + + This is best effort. + + - If the request hasn't been assigned to a replica, the assignment will be + cancelled. + - If the request has been assigned to a replica, `ray.cancel` will be + called on the object ref, attempting to cancel the request and any downstream + requests it makes. + + If the request is successfully cancelled, subsequent operations on the ref will + raise an exception: + + - If the request was cancelled before assignment, they'll raise + `asyncio.CancelledError` (or a `concurrent.futures.CancelledError` for + synchronous methods like `.result()`.). + - If the request was cancelled after assignment, they'll raise + `ray.exceptions.TaskCancelledError`. + """ + if self._cancelled: + return + + self._cancelled = True + if not self._replica_result_future.done(): + self._replica_result_future.cancel() + elif self._replica_result_future.exception() is None: + self._fetch_future_result_sync() + self._replica_result.cancel() + + @DeveloperAPI + def cancelled(self) -> bool: + """Whether or not the request has been cancelled. + + This is `True` if `.cancel()` is called, but the request may actually have run + to completion. + """ + return self._cancelled + + +@PublicAPI(stability="stable") +class DeploymentResponse(_DeploymentResponseBase): + """A future-like object wrapping the result of a unary deployment handle call. + + From inside a deployment, a `DeploymentResponse` can be awaited to retrieve the + output of the call without blocking the asyncio event loop. + + From outside a deployment, `.result()` can be used to retrieve the output in a + blocking manner. + + Example: + + .. code-block:: python + + from ray import serve + from ray.serve.handle import DeploymentHandle + + @serve.deployment + class Downstream: + def say_hi(self, message: str) -> str: + return f"Hello {message}!" + + @serve.deployment + class Caller: + def __init__(self, handle: DeploymentHandle): + self._downstream_handle = handle + + async def __call__(self, message: str) -> str: + # Inside a deployment: `await` the result to enable concurrency. + response = self._downstream_handle.say_hi.remote(message) + return await response + + app = Caller.bind(Downstream.bind()) + handle: DeploymentHandle = serve.run(app) + + # Outside a deployment: call `.result()` to get output. + response = handle.remote("world") + assert response.result() == "Hello world!" + + A `DeploymentResponse` can be passed directly to another `DeploymentHandle` call + without fetching the result to enable composing multiple deployments together. + + Example: + + .. code-block:: python + + from ray import serve + from ray.serve.handle import DeploymentHandle + + @serve.deployment + class Adder: + def add(self, val: int) -> int: + return val + 1 + + @serve.deployment + class Caller: + def __init__(self, handle: DeploymentHandle): + self._adder_handle = handle + + async def __call__(self, start: int) -> int: + return await self._adder_handle.add.remote( + # Pass the response directly to another handle call without awaiting. + self._adder_handle.add.remote(start) + ) + + app = Caller.bind(Adder.bind()) + handle: DeploymentHandle = serve.run(app) + assert handle.remote(0).result() == 2 + """ + + def __await__(self): + """Yields the final result of the deployment handle call.""" + replica_result = yield from self._fetch_future_result_async().__await__() + result = yield from replica_result.get_async().__await__() + return result + + def __reduce__(self): + raise RayServeException( + "`DeploymentResponse` is not serializable. If you are passing the " + "`DeploymentResponse` in a nested object (e.g. a list or dictionary) to a " + "downstream deployment handle call, that is no longer supported. Please " + "only pass `DeploymentResponse` objects as top level arguments." + ) + + def result( + self, + *, + timeout_s: Optional[float] = None, + _skip_asyncio_check: bool = False, + ) -> Any: + """Fetch the result of the handle call synchronously. + + This should *not* be used from within a deployment as it runs in an asyncio + event loop. For model composition, `await` the response instead. + + If `timeout_s` is provided and the result is not available before the timeout, + a `TimeoutError` is raised. + """ + + if not _skip_asyncio_check and is_running_in_asyncio_loop(): + raise RuntimeError( + "Sync methods should not be called from within an `asyncio` event " + "loop. Use `await response` instead." + ) + + start_time_s = time.time() + replica_result = self._fetch_future_result_sync(timeout_s) + + remaining_timeout_s = calculate_remaining_timeout( + timeout_s=timeout_s, start_time_s=start_time_s, curr_time_s=time.time() + ) + return replica_result.get(remaining_timeout_s) + + @DeveloperAPI + async def _to_object_ref(self) -> ray.ObjectRef: + """Advanced API to convert the response to a Ray `ObjectRef`. + + This is used to pass the output of a `DeploymentHandle` call to a Ray task or + actor method call. + + This method is `async def` because it will block until the handle call has been + assigned to a replica. If there are many requests in flight and all + replicas' queues are full, this may be a slow operation. + """ + + ServeUsageTag.DEPLOYMENT_HANDLE_TO_OBJECT_REF_API_USED.record("1") + + replica_result = await self._fetch_future_result_async() + return await replica_result.to_object_ref_async() + + @DeveloperAPI + def _to_object_ref_sync( + self, + _timeout_s: Optional[float] = None, + _allow_running_in_asyncio_loop: bool = False, + ) -> ray.ObjectRef: + """Advanced API to convert the response to a Ray `ObjectRef`. + + This is used to pass the output of a `DeploymentHandle` call to a Ray task or + actor method call. + + This method is a *blocking* call because it will block until the handle call has + been assigned to a replica. If there are many requests in flight and all + replicas' queues are full, this may be a slow operation. + + From inside a deployment, `_to_object_ref` should be used instead to avoid + blocking the asyncio event loop. + """ + + ServeUsageTag.DEPLOYMENT_HANDLE_TO_OBJECT_REF_API_USED.record("1") + + if not _allow_running_in_asyncio_loop and is_running_in_asyncio_loop(): + raise RuntimeError( + "Sync methods should not be called from within an `asyncio` event " + "loop. Use `await response._to_object_ref()` instead." + ) + + # First, fetch the result of the future + start_time_s = time.time() + replica_result = self._fetch_future_result_sync(_timeout_s) + + # Then, if necessary, resolve generator to ref + remaining_timeout_s = calculate_remaining_timeout( + timeout_s=_timeout_s, + start_time_s=start_time_s, + curr_time_s=time.time(), + ) + return replica_result.to_object_ref(timeout_s=remaining_timeout_s) + + +@PublicAPI(stability="stable") +class DeploymentResponseGenerator(_DeploymentResponseBase): + """A future-like object wrapping the result of a streaming deployment handle call. + + This is returned when using `handle.options(stream=True)` and calling a generator + deployment method. + + `DeploymentResponseGenerator` is both a synchronous and asynchronous iterator. + + When iterating over results from inside a deployment, `async for` should be used to + avoid blocking the asyncio event loop. + + When iterating over results from outside a deployment, use a standard `for` loop. + + Example: + + .. code-block:: python + + from typing import AsyncGenerator, Generator + + from ray import serve + from ray.serve.handle import DeploymentHandle + + @serve.deployment + class Streamer: + def generate_numbers(self, limit: int) -> Generator[int]: + for i in range(limit): + yield i + + @serve.deployment + class Caller: + def __init__(self, handle: DeploymentHandle): + # Set `stream=True` on the handle to enable streaming calls. + self._streaming_handle = handle.options(stream=True) + + async def __call__(self, limit: int) -> AsyncIterator[int]: + gen: DeploymentResponseGenerator = ( + self._streaming_handle.generate_numbers.remote(limit) + ) + + # Inside a deployment: use `async for` to enable concurrency. + async for i in gen: + yield i + + app = Caller.bind(Streamer.bind()) + handle: DeploymentHandle = serve.run(app) + + # Outside a deployment: use a standard `for` loop. + gen: DeploymentResponseGenerator = handle.options(stream=True).remote(10) + assert [i for i in gen] == list(range(10)) + + A `DeploymentResponseGenerator` *cannot* currently be passed to another + `DeploymentHandle` call. + """ + + def __await__(self): + raise TypeError( + "`DeploymentResponseGenerator` cannot be awaited directly. Use `async for` " + "or `await response.__anext__() instead`." + ) + + def __aiter__(self) -> AsyncIterator[Any]: + return self + + async def __anext__(self) -> Any: + replica_result = await self._fetch_future_result_async() + return await replica_result.__anext__() + + def __iter__(self) -> Iterator[Any]: + return self + + def __next__(self) -> Any: + if is_running_in_asyncio_loop(): + raise RuntimeError( + "Sync methods should not be called from within an `asyncio` event " + "loop. Use `async for` or `await response.__anext__()` instead." + ) + + replica_result = self._fetch_future_result_sync() + return replica_result.__next__() + + @DeveloperAPI + async def _to_object_ref_gen(self) -> ObjectRefGenerator: + """Advanced API to convert the generator to a Ray `ObjectRefGenerator`. + + This method is `async def` because it will block until the handle call has been + assigned to a replica. If there are many requests in flight and all + replicas' queues are full, this may be a slow operation. + """ + + ServeUsageTag.DEPLOYMENT_HANDLE_TO_OBJECT_REF_API_USED.record("1") + + replica_result = await self._fetch_future_result_async() + return replica_result.to_object_ref_gen() + + @DeveloperAPI + def _to_object_ref_gen_sync( + self, + _timeout_s: Optional[float] = None, + _allow_running_in_asyncio_loop: bool = False, + ) -> ObjectRefGenerator: + """Advanced API to convert the generator to a Ray `ObjectRefGenerator`. + + This method is a *blocking* call because it will block until the handle call has + been assigned to a replica. If there are many requests in flight and all + replicas' queues are full, this may be a slow operation. + + From inside a deployment, `_to_object_ref_gen` should be used instead to avoid + blocking the asyncio event loop. + """ + + ServeUsageTag.DEPLOYMENT_HANDLE_TO_OBJECT_REF_API_USED.record("1") + + if not _allow_running_in_asyncio_loop and is_running_in_asyncio_loop(): + raise RuntimeError( + "Sync methods should not be called from within an `asyncio` event " + "loop. Use `await response._to_object_ref()` instead." + ) + + replica_result = self._fetch_future_result_sync(_timeout_s) + return replica_result.to_object_ref_gen() + + +@PublicAPI(stability="stable") +class DeploymentHandle(_DeploymentHandleBase): + """A handle used to make requests to a deployment at runtime. + + This is primarily used to compose multiple deployments within a single application. + It can also be used to make calls to the ingress deployment of an application (e.g., + for programmatic testing). + + Example: + + + .. code-block:: python + + import ray + from ray import serve + from ray.serve.handle import DeploymentHandle, DeploymentResponse + + @serve.deployment + class Downstream: + def say_hi(self, message: str): + return f"Hello {message}!" + self._message = message + + @serve.deployment + class Ingress: + def __init__(self, handle: DeploymentHandle): + self._downstream_handle = handle + + async def __call__(self, name: str) -> str: + response = self._handle.say_hi.remote(name) + return await response + + app = Ingress.bind(Downstream.bind()) + handle: DeploymentHandle = serve.run(app) + response = handle.remote("world") + assert response.result() == "Hello world!" + """ + + def options( + self, + *, + method_name: Union[str, DEFAULT] = DEFAULT.VALUE, + multiplexed_model_id: Union[str, DEFAULT] = DEFAULT.VALUE, + stream: Union[bool, DEFAULT] = DEFAULT.VALUE, + use_new_handle_api: Union[bool, DEFAULT] = DEFAULT.VALUE, + _prefer_local_routing: Union[bool, DEFAULT] = DEFAULT.VALUE, + ) -> "DeploymentHandle": + """Set options for this handle and return an updated copy of it. + + Example: + + .. code-block:: python + + response: DeploymentResponse = handle.options( + method_name="other_method", + multiplexed_model_id="model:v1", + ).remote() + """ + if use_new_handle_api is not DEFAULT.VALUE: + warnings.warn( + "Setting `use_new_handle_api` no longer has any effect. " + "This argument will be removed in a future version." + ) + + if _prefer_local_routing is not DEFAULT.VALUE: + warnings.warn( + "Modifying `_prefer_local_routing` with `options()` is " + "deprecated. Please use `init()` instead." + ) + + return self._options( + method_name=method_name, + multiplexed_model_id=multiplexed_model_id, + stream=stream, + _prefer_local_routing=_prefer_local_routing, + ) + + def remote( + self, *args, **kwargs + ) -> Union[DeploymentResponse, DeploymentResponseGenerator]: + """Issue a remote call to a method of the deployment. + + By default, the result is a `DeploymentResponse` that can be awaited to fetch + the result of the call or passed to another `.remote()` call to compose multiple + deployments. + + If `handle.options(stream=True)` is set and a generator method is called, this + returns a `DeploymentResponseGenerator` instead. + + Example: + + .. code-block:: python + + # Fetch the result directly. + response = handle.remote() + result = await response + + # Pass the result to another handle call. + composed_response = handle2.remote(handle1.remote()) + composed_result = await composed_response + + Args: + *args: Positional arguments to be serialized and passed to the + remote method call. + **kwargs: Keyword arguments to be serialized and passed to the + remote method call. + """ + + future, request_metadata = self._remote(args, kwargs) + if self.handle_options.stream: + response_cls = DeploymentResponseGenerator + else: + response_cls = DeploymentResponse + + return response_cls(future, request_metadata) diff --git a/.venv/lib/python3.11/site-packages/ray/serve/metrics.py b/.venv/lib/python3.11/site-packages/ray/serve/metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..bce6e49440047a77b30199e4938484cdc938f3df --- /dev/null +++ b/.venv/lib/python3.11/site-packages/ray/serve/metrics.py @@ -0,0 +1,249 @@ +from typing import Dict, List, Optional, Tuple, Union + +import ray +from ray.serve import context +from ray.util import metrics +from ray.util.annotations import PublicAPI + +DEPLOYMENT_TAG = "deployment" +REPLICA_TAG = "replica" +APPLICATION_TAG = "application" +ROUTE_TAG = "route" + + +def _add_serve_metric_tags(tag_keys: Optional[Tuple[str]] = None) -> Tuple[str]: + """Add serve context tags to the tag_keys""" + if tag_keys is None: + tag_keys = tuple() + + # If the context doesn't exist, no serve tag is added. + if context._get_internal_replica_context() is None: + return tag_keys + # Check no collision with customer tag + if DEPLOYMENT_TAG in tag_keys: + raise ValueError(f"'{DEPLOYMENT_TAG}' tag is reserved for Ray Serve metrics") + if REPLICA_TAG in tag_keys: + raise ValueError(f"'{REPLICA_TAG}' tag is reserved for Ray Serve metrics") + if APPLICATION_TAG in tag_keys: + raise ValueError(f"'{APPLICATION_TAG}' tag is reserved for Ray Serve metrics") + + # Get serve tag inserted: + ray_serve_tags = (DEPLOYMENT_TAG, REPLICA_TAG) + if context._get_internal_replica_context().app_name: + ray_serve_tags += (APPLICATION_TAG,) + if tag_keys: + tag_keys = ray_serve_tags + tag_keys + else: + tag_keys = ray_serve_tags + return tag_keys + + +def _add_serve_metric_default_tags(default_tags: Dict[str, str]): + """Add serve context tags and values to the default_tags""" + if context._get_internal_replica_context() is None: + return default_tags + if DEPLOYMENT_TAG in default_tags: + raise ValueError(f"'{DEPLOYMENT_TAG}' tag is reserved for Ray Serve metrics") + if REPLICA_TAG in default_tags: + raise ValueError(f"'{REPLICA_TAG}' tag is reserved for Ray Serve metrics") + if APPLICATION_TAG in default_tags: + raise ValueError(f"'{APPLICATION_TAG}' tag is reserved for Ray Serve metrics") + replica_context = context._get_internal_replica_context() + # TODO(zcin): use replica_context.deployment for deployment tag + default_tags[DEPLOYMENT_TAG] = replica_context.deployment + default_tags[REPLICA_TAG] = replica_context.replica_tag + if replica_context.app_name: + default_tags[APPLICATION_TAG] = replica_context.app_name + return default_tags + + +def _add_serve_context_tag_values(tag_keys: Tuple, tags: Dict[str, str]): + """Add serve context tag values to the metric tags""" + + _request_context = ray.serve.context._get_serve_request_context() + if ROUTE_TAG in tag_keys and ROUTE_TAG not in tags: + tags[ROUTE_TAG] = _request_context.route + + +@PublicAPI(stability="beta") +class Counter(metrics.Counter): + """A serve cumulative metric that is monotonically increasing. + + This corresponds to Prometheus' counter metric: + https://prometheus.io/docs/concepts/metric_types/#counter + + Serve-related tags ("deployment", "replica", "application", "route") + are added automatically if not provided. + + .. code-block:: python + + @serve.deployment + class MyDeployment: + def __init__(self): + self.num_requests = 0 + self.my_counter = metrics.Counter( + "my_counter", + description=("The number of odd-numbered requests " + "to this deployment."), + tag_keys=("model",), + ) + self.my_counter.set_default_tags({"model": "123"}) + + def __call__(self): + self.num_requests += 1 + if self.num_requests % 2 == 1: + self.my_counter.inc() + + .. note:: + + Before Ray 2.10, this exports a Prometheus gauge metric instead of + a counter metric. + Starting in Ray 2.10, this exports both the proper counter metric + (with a suffix "_total") and gauge metric (for compatibility). + The gauge metric will be removed in a future Ray release and you can set + `RAY_EXPORT_COUNTER_AS_GAUGE=0` to disable exporting it in the meantime. + + Args: + name: Name of the metric. + description: Description of the metric. + tag_keys: Tag keys of the metric. + """ + + def __init__( + self, name: str, description: str = "", tag_keys: Optional[Tuple[str]] = None + ): + if tag_keys and not isinstance(tag_keys, tuple): + raise TypeError( + "tag_keys should be a tuple type, got: " f"{type(tag_keys)}" + ) + tag_keys = _add_serve_metric_tags(tag_keys) + super().__init__(name, description, tag_keys) + self.set_default_tags({}) + + def set_default_tags(self, default_tags: Dict[str, str]): + super().set_default_tags(_add_serve_metric_default_tags(default_tags)) + + def inc(self, value: Union[int, float] = 1.0, tags: Dict[str, str] = None): + """Increment the counter by the given value, add serve context + tag values to the tags + """ + _add_serve_context_tag_values(self._tag_keys, tags) + super().inc(value, tags) + + +@PublicAPI(stability="beta") +class Gauge(metrics.Gauge): + """Gauges keep the last recorded value and drop everything before. + + This corresponds to Prometheus' gauge metric: + https://prometheus.io/docs/concepts/metric_types/#gauge + + Serve-related tags ("deployment", "replica", "application", "route") + are added automatically if not provided. + + .. code-block:: python + + @serve.deployment + class MyDeployment: + def __init__(self): + self.num_requests = 0 + self.my_gauge = metrics.Gauge( + "my_gauge", + description=("The current memory usage."), + tag_keys=("model",), + ) + self.my_counter.set_default_tags({"model": "123"}) + + def __call__(self): + process = psutil.Process() + self.gauge.set(process.memory_info().rss) + + Args: + name: Name of the metric. + description: Description of the metric. + tag_keys: Tag keys of the metric. + """ + + def __init__( + self, name: str, description: str = "", tag_keys: Optional[Tuple[str]] = None + ): + if tag_keys and not isinstance(tag_keys, tuple): + raise TypeError( + "tag_keys should be a tuple type, got: " f"{type(tag_keys)}" + ) + tag_keys = _add_serve_metric_tags(tag_keys) + super().__init__(name, description, tag_keys) + self.set_default_tags({}) + + def set_default_tags(self, default_tags: Dict[str, str]): + super().set_default_tags(_add_serve_metric_default_tags(default_tags)) + + def set(self, value: Union[int, float], tags: Dict[str, str] = None): + """Set the gauge to the given value, add serve context + tag values to the tags + """ + _add_serve_context_tag_values(self._tag_keys, tags) + super().set(value, tags) + + +@PublicAPI(stability="beta") +class Histogram(metrics.Histogram): + """Tracks the size and number of events in buckets. + + Histograms allow you to calculate aggregate quantiles + such as 25, 50, 95, 99 percentile latency for an RPC. + + This corresponds to Prometheus' histogram metric: + https://prometheus.io/docs/concepts/metric_types/#histogram + + Serve-related tags ("deployment", "replica", "application", "route") + are added automatically if not provided. + + .. code-block:: python + + @serve.deployment + class MyDeployment: + def __init__(self): + self.my_histogram = Histogram( + "my_histogram", + description=("Histogram of the __call__ method running time."), + boundaries=[1,2,4,8,16,32,64], + tag_keys=("model",), + ) + self.my_histogram.set_default_tags({"model": "123"}) + + def __call__(self): + start = time.time() + self.my_histogram.observe(time.time() - start) + + Args: + name: Name of the metric. + description: Description of the metric. + boundaries: Boundaries of histogram buckets. + tag_keys: Tag keys of the metric. + """ + + def __init__( + self, + name: str, + description: str = "", + boundaries: List[float] = None, + tag_keys: Optional[Tuple[str]] = None, + ): + if tag_keys and not isinstance(tag_keys, tuple): + raise TypeError( + "tag_keys should be a tuple type, got: " f"{type(tag_keys)}" + ) + tag_keys = _add_serve_metric_tags(tag_keys) + super().__init__(name, description, boundaries, tag_keys) + self.set_default_tags({}) + + def set_default_tags(self, default_tags: Dict[str, str]): + super().set_default_tags(_add_serve_metric_default_tags(default_tags)) + + def observe(self, value: Union[int, float], tags: Dict[str, str] = None): + """Observe the given value, add serve context + tag values to the tags + """ + _add_serve_context_tag_values(self._tag_keys, tags) + super().observe(value, tags) diff --git a/.venv/lib/python3.11/site-packages/ray/serve/multiplex.py b/.venv/lib/python3.11/site-packages/ray/serve/multiplex.py new file mode 100644 index 0000000000000000000000000000000000000000..410f627a088792088db8a4724f370d167ed3e9fb --- /dev/null +++ b/.venv/lib/python3.11/site-packages/ray/serve/multiplex.py @@ -0,0 +1,258 @@ +import asyncio +import inspect +import logging +import time +from collections import OrderedDict +from typing import Any, Callable, List, Set + +from ray.serve import metrics +from ray.serve._private.common import MultiplexedReplicaInfo +from ray.serve._private.constants import ( + DEFAULT_LATENCY_BUCKET_MS, + PUSH_MULTIPLEXED_MODEL_IDS_INTERVAL_S, + SERVE_LOGGER_NAME, +) +from ray.serve._private.metrics_utils import MetricsPusher +from ray.serve._private.usage import ServeUsageTag +from ray.serve.context import _get_global_client, _get_internal_replica_context + +logger = logging.getLogger(SERVE_LOGGER_NAME) + + +class _ModelMultiplexWrapper: + """A wrapper class that wraps the model load function and + provides the LRU caching functionality. + + The model multiplexer is a wrapper class that wraps the model load function + and provides the LRU caching functionality, and the model load function should + be a coroutine function that takes the model ID as the first argument and + returns the user-constructed model object. + The model multiplexer will also ensure that the number of models on the current + replica does not exceed the specified limit. + The model will be unloaded in the LRU order, the model multiplexer will call the + model's __del__ attribute if it exists to clean up the model resources eagerly. + + """ + + _PUSH_MULTIPLEXED_MODEL_IDS_TASK_NAME = "push_multiplexed_model_ids" + + def __init__( + self, + model_load_func: Callable[[str], Any], + self_arg: Any, + max_num_models_per_replica: int, + ): + """Initialize the model multiplexer. + Args: + model_load_func: the model load async function. + self_arg: self argument when model_load_func is class method. + max_num_models_per_replica: the maximum number of models to be loaded on the + current replica. If it is -1, there is no limit for the number of models + per replica. + """ + + ServeUsageTag.MULTIPLEXED_API_USED.record("1") + + self.models = OrderedDict() + self._func: Callable = model_load_func + self.self_arg: Any = self_arg + self.max_num_models_per_replica: int = max_num_models_per_replica + + self.model_load_latency_ms = metrics.Histogram( + "serve_multiplexed_model_load_latency_ms", + description="The time it takes to load a model.", + boundaries=DEFAULT_LATENCY_BUCKET_MS, + ) + self.model_unload_latency_ms = metrics.Histogram( + "serve_multiplexed_model_unload_latency_ms", + description="The time it takes to unload a model.", + boundaries=DEFAULT_LATENCY_BUCKET_MS, + ) + self.num_models_gauge = metrics.Gauge( + "serve_num_multiplexed_models", + description="The number of models loaded on the current replica.", + ) + + self.registered_model_gauge = metrics.Gauge( + "serve_registered_multiplexed_model_id", + description="The model id registered on the current replica.", + tag_keys=("model_id",), + ) + self.get_model_requests_counter = metrics.Counter( + "serve_multiplexed_get_model_requests_counter", + description="The counter for get model requests on the current replica.", + ) + self.models_unload_counter = metrics.Counter( + "serve_multiplexed_models_unload_counter", + description="The counter for unloaded models on the current replica.", + ) + self.models_load_counter = metrics.Counter( + "serve_multiplexed_models_load_counter", + description="The counter for loaded models on the current replica.", + ) + + context = _get_internal_replica_context() + if context is None: + raise RuntimeError( + "`@serve.multiplex` can only be used within a deployment " + "(failed to retrieve Serve replica context)." + ) + + self._app_name: str = context.app_name + self._deployment_name: str = context.deployment + self._replica_id: str = context.replica_id + + # Whether to push the multiplexed replica info to the controller. + self._push_multiplexed_replica_info: bool = False + + # Model cache lock to ensure that only one model is loading/unloading at a time. + self._model_cache_lock = asyncio.Lock() + # The set of model IDs that are being loaded. This is used to early push + # model ids info to the controller. The tasks will be added when there is cache + # miss, and will be removed when the model is loaded successfully or + # failed to load. + self._model_load_tasks: Set[str] = set() + + self.metrics_pusher = MetricsPusher() + self.metrics_pusher.register_or_update_task( + self._PUSH_MULTIPLEXED_MODEL_IDS_TASK_NAME, + self._push_model_ids_info, + PUSH_MULTIPLEXED_MODEL_IDS_INTERVAL_S, + ) + self.metrics_pusher.start() + + def _get_loading_and_loaded_model_ids(self) -> List[str]: + """Get the model IDs of the loaded models & loading models in the replica. + This is to push the model id information early to the controller, so that + requests can be routed to the replica. + """ + models_list = set(self.models.keys()) + models_list.update(self._model_load_tasks) + return list(models_list) + + def _push_model_ids_info(self): + """Push the multiplexed replica info to the controller.""" + try: + self.num_models_gauge.set(len(self.models)) + + for model_id in self.models: + self.registered_model_gauge.set(1, tags={"model_id": model_id}) + + if self._push_multiplexed_replica_info: + _get_global_client().record_multiplexed_replica_info( + MultiplexedReplicaInfo( + self._replica_id, + self._get_loading_and_loaded_model_ids(), + ) + ) + self._push_multiplexed_replica_info = False + except Exception as e: + logger.warning( + "Failed to push the multiplexed replica info " + f"to the controller. Error: {e}" + ) + + async def shutdown(self): + """Unload all the models when the model multiplexer is deleted.""" + while len(self.models) > 0: + try: + await self.unload_model_lru() + except Exception as e: + logger.exception( + f"Failed to unload model. Error: {e}", + ) + + async def load_model(self, model_id: str) -> Any: + """Load the model if it is not loaded yet, and return + the user-constructed model object. + + Args: + model_id: the model ID. + + Returns: + The user-constructed model object. + """ + + if type(model_id) is not str: + raise TypeError("The model ID must be a string.") + + if not model_id: + raise ValueError("The model ID cannot be empty.") + + self.get_model_requests_counter.inc() + + if model_id in self.models: + # Move the model to the end of the OrderedDict to ensure LRU caching. + model = self.models.pop(model_id) + self.models[model_id] = model + return self.models[model_id] + else: + # Set the flag to push the multiplexed replica info to the controller + # before loading the model. This is to make sure we can push the model + # id info to the controller/router early, so that requests can be routed to + # the replica. + self._push_multiplexed_replica_info = True + self._model_load_tasks.add(model_id) + async with self._model_cache_lock: + # Check if the model has been loaded by another request. + if model_id in self.models: + return self.models[model_id] + try: + # If the number of models per replica is specified, check + # if the number of models on the current replica has + # reached the limit. + if ( + self.max_num_models_per_replica > 0 + and len(self.models) >= self.max_num_models_per_replica + ): + # Unload the least recently used model. + await self.unload_model_lru() + self._push_multiplexed_replica_info = True + + # Load the model. + logger.info(f"Loading model '{model_id}'.") + self.models_load_counter.inc() + load_start_time = time.time() + if self.self_arg is None: + self.models[model_id] = await self._func(model_id) + else: + self.models[model_id] = await self._func( + self.self_arg, model_id + ) + load_latency_ms = (time.time() - load_start_time) * 1000.0 + logger.info( + f"Successfully loaded model '{model_id}' in " + f"{load_latency_ms:.1f}ms." + ) + self._model_load_tasks.discard(model_id) + self.model_load_latency_ms.observe(load_latency_ms) + return self.models[model_id] + except Exception as e: + logger.error( + f"Failed to load model '{model_id}'. Error: {e}", + ) + self._model_load_tasks.discard(model_id) + raise e + + async def unload_model_lru(self) -> None: + """Unload the least recently used model.""" + + self.models_unload_counter.inc() + unload_start_time = time.time() + model_id, model = self.models.popitem(last=False) + logger.info(f"Unloading model '{model_id}'.") + + # If the model has __del__ attribute, call it. + # This is to clean up the model resources eagerly. + if hasattr(model, "__del__"): + if not inspect.iscoroutinefunction(model.__del__): + await asyncio.get_running_loop().run_in_executor(None, model.__del__) + else: + await model.__del__() + model.__del__ = lambda _: None + unload_latency_ms = (time.time() - unload_start_time) * 1000.0 + self.model_unload_latency_ms.observe(unload_latency_ms) + logger.info( + f"Successfully unloaded model '{model_id}' in {unload_latency_ms:.1f}ms." + ) + self.registered_model_gauge.set(0, tags={"model_id": model_id}) diff --git a/.venv/lib/python3.11/site-packages/ray/serve/schema.py b/.venv/lib/python3.11/site-packages/ray/serve/schema.py new file mode 100644 index 0000000000000000000000000000000000000000..ed696219b0c33fee067bd4c216b091be815521b6 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/ray/serve/schema.py @@ -0,0 +1,1142 @@ +import logging +from collections import Counter +from dataclasses import dataclass, field +from enum import Enum +from typing import Any, Dict, List, Optional, Set, Union +from zlib import crc32 + +from ray._private.pydantic_compat import ( + BaseModel, + Extra, + Field, + NonNegativeInt, + PositiveInt, + StrictInt, + root_validator, + validator, +) +from ray._private.runtime_env.packaging import parse_uri +from ray.serve._private.common import ( + DeploymentStatus, + DeploymentStatusTrigger, + ReplicaState, + ServeDeployMode, +) +from ray.serve._private.constants import ( + DEFAULT_GRPC_PORT, + DEFAULT_MAX_ONGOING_REQUESTS, + DEFAULT_UVICORN_KEEP_ALIVE_TIMEOUT_S, + RAY_SERVE_LOG_ENCODING, + SERVE_DEFAULT_APP_NAME, +) +from ray.serve._private.deployment_info import DeploymentInfo +from ray.serve._private.utils import DEFAULT +from ray.serve.config import ProxyLocation +from ray.util.annotations import PublicAPI + +# Shared amongst multiple schemas. +TARGET_CAPACITY_FIELD = Field( + default=None, + description=( + "[EXPERIMENTAL]: the target capacity percentage for all replicas across the " + "cluster. The `num_replicas`, `min_replicas`, `max_replicas`, and " + "`initial_replicas` for each deployment will be scaled by this percentage." + ), + ge=0, + le=100, +) + + +def _route_prefix_format(cls, v): + """ + The route_prefix + 1. must start with a / character + 2. must not end with a / character (unless the entire prefix is just /) + 3. cannot contain wildcards (must not have "{" or "}") + """ + + if v is None: + return v + + if not v.startswith("/"): + raise ValueError( + f'Got "{v}" for route_prefix. Route prefix must start with "/".' + ) + if len(v) > 1 and v.endswith("/"): + raise ValueError( + f'Got "{v}" for route_prefix. Route prefix ' + 'cannot end with "/" unless the ' + 'entire prefix is just "/".' + ) + if "{" in v or "}" in v: + raise ValueError( + f'Got "{v}" for route_prefix. Route prefix ' + "cannot contain wildcards, so it cannot " + 'contain "{" or "}".' + ) + + return v + + +@PublicAPI(stability="alpha") +class EncodingType(str, Enum): + """Encoding type for the serve logs.""" + + TEXT = "TEXT" + JSON = "JSON" + + +@PublicAPI(stability="alpha") +class LoggingConfig(BaseModel): + """Logging config schema for configuring serve components logs. + + Example: + + .. code-block:: python + + from ray import serve + from ray.serve.schema import LoggingConfig + # Set log level for the deployment. + @serve.deployment(LoggingConfig(log_level="DEBUG")) + class MyDeployment: + def __call__(self) -> str: + return "Hello world!" + # Set log directory for the deployment. + @serve.deployment(LoggingConfig(logs_dir="/my_dir")) + class MyDeployment: + def __call__(self) -> str: + return "Hello world!" + """ + + class Config: + extra = Extra.forbid + + encoding: Union[str, EncodingType] = Field( + default_factory=lambda: RAY_SERVE_LOG_ENCODING, + description=( + "Encoding type for the serve logs. Defaults to 'TEXT'. The default can be " + "overwritten using the `RAY_SERVE_LOG_ENCODING` environment variable. " + "'JSON' is also supported for structured logging." + ), + ) + log_level: Union[int, str] = Field( + default="INFO", + description=( + "Log level for the serve logs. Defaults to INFO. You can set it to " + "'DEBUG' to get more detailed debug logs." + ), + ) + logs_dir: Union[str, None] = Field( + default=None, + description=( + "Directory to store the logs. Default to None, which means " + "logs will be stored in the default directory " + "('/tmp/ray/session_latest/logs/serve/...')." + ), + ) + enable_access_log: bool = Field( + default=True, + description=( + "Whether to enable access logs for each request. Default to True." + ), + ) + + @validator("encoding") + def valid_encoding_format(cls, v): + if v not in list(EncodingType): + raise ValueError( + f"Got '{v}' for encoding. Encoding must be one " + f"of {set(EncodingType)}." + ) + + return v + + @validator("log_level") + def valid_log_level(cls, v): + if isinstance(v, int): + if v not in logging._levelToName: + raise ValueError( + f'Got "{v}" for log_level. log_level must be one of ' + f"{list(logging._levelToName.keys())}." + ) + return logging._levelToName[v] + + if v not in logging._nameToLevel: + raise ValueError( + f'Got "{v}" for log_level. log_level must be one of ' + f"{list(logging._nameToLevel.keys())}." + ) + return v + + def _compute_hash(self) -> int: + return crc32( + ( + str(self.encoding) + + str(self.log_level) + + str(self.logs_dir) + + str(self.enable_access_log) + ).encode("utf-8") + ) + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, LoggingConfig): + return False + return self._compute_hash() == other._compute_hash() + + +@PublicAPI(stability="stable") +class RayActorOptionsSchema(BaseModel): + """Options with which to start a replica actor.""" + + runtime_env: dict = Field( + default={}, + description=( + "This deployment's runtime_env. working_dir and " + "py_modules may contain only remote URIs." + ), + ) + num_cpus: float = Field( + default=None, + description=( + "The number of CPUs required by the deployment's " + "application per replica. This is the same as a ray " + "actor's num_cpus. Uses a default if null." + ), + ge=0, + ) + num_gpus: float = Field( + default=None, + description=( + "The number of GPUs required by the deployment's " + "application per replica. This is the same as a ray " + "actor's num_gpus. Uses a default if null." + ), + ge=0, + ) + memory: float = Field( + default=None, + description=( + "Restrict the heap memory usage of each replica. Uses a default if null." + ), + ge=0, + ) + resources: Dict = Field( + default={}, + description=("The custom resources required by each replica."), + ) + accelerator_type: str = Field( + default=None, + description=( + "Forces replicas to run on nodes with the specified accelerator type." + "See :ref:`accelerator types `." + ), + ) + + @validator("runtime_env") + def runtime_env_contains_remote_uris(cls, v): + # Ensure that all uris in py_modules and working_dir are remote + + if v is None: + return + + uris = v.get("py_modules", []) + if "working_dir" in v and v["working_dir"] not in uris: + uris.append(v["working_dir"]) + + for uri in uris: + if uri is not None: + try: + parse_uri(uri) + except ValueError as e: + raise ValueError( + "runtime_envs in the Serve config support only " + "remote URIs in working_dir and py_modules. Got " + f"error when parsing URI: {e}" + ) + + return v + + +@PublicAPI(stability="stable") +class DeploymentSchema(BaseModel, allow_population_by_field_name=True): + """ + Specifies options for one deployment within a Serve application. For each deployment + this can optionally be included in `ServeApplicationSchema` to override deployment + options specified in code. + """ + + name: str = Field( + ..., description=("Globally-unique name identifying this deployment.") + ) + num_replicas: Optional[Union[PositiveInt, str]] = Field( + default=DEFAULT.VALUE, + description=( + "The number of processes that handle requests to this " + "deployment. Uses a default if null. Can also be set to " + "`auto` for a default autoscaling configuration " + "(experimental)." + ), + ) + max_ongoing_requests: int = Field( + default=DEFAULT.VALUE, + description=( + "Maximum number of requests that are sent in parallel " + "to each replica of this deployment. The limit is enforced across all " + "callers (HTTP requests or DeploymentHandles). Defaults to " + f"{DEFAULT_MAX_ONGOING_REQUESTS}." + ), + gt=0, + ) + max_queued_requests: StrictInt = Field( + default=DEFAULT.VALUE, + description=( + "[DEPRECATED] The max number of requests that will be executed at once in " + f"each replica. Defaults to {DEFAULT_MAX_ONGOING_REQUESTS}." + ), + ) + user_config: Optional[Dict] = Field( + default=DEFAULT.VALUE, + description=( + "Config to pass into this deployment's " + "reconfigure method. This can be updated dynamically " + "without restarting replicas" + ), + ) + autoscaling_config: Optional[Dict] = Field( + default=DEFAULT.VALUE, + description=( + "Config specifying autoscaling " + "parameters for the deployment's number of replicas. " + "If null, the deployment won't autoscale its number of " + "replicas; the number of replicas will be fixed at " + "num_replicas." + ), + ) + graceful_shutdown_wait_loop_s: float = Field( + default=DEFAULT.VALUE, + description=( + "Duration that deployment replicas will wait until there " + "is no more work to be done before shutting down. Uses a " + "default if null." + ), + ge=0, + ) + graceful_shutdown_timeout_s: float = Field( + default=DEFAULT.VALUE, + description=( + "Serve controller waits for this duration before " + "forcefully killing the replica for shutdown. Uses a " + "default if null." + ), + ge=0, + ) + health_check_period_s: float = Field( + default=DEFAULT.VALUE, + description=( + "Frequency at which the controller will health check " + "replicas. Uses a default if null." + ), + gt=0, + ) + health_check_timeout_s: float = Field( + default=DEFAULT.VALUE, + description=( + "Timeout that the controller will wait for a response " + "from the replica's health check before marking it " + "unhealthy. Uses a default if null." + ), + gt=0, + ) + ray_actor_options: RayActorOptionsSchema = Field( + default=DEFAULT.VALUE, description="Options set for each replica actor." + ) + + placement_group_bundles: List[Dict[str, float]] = Field( + default=DEFAULT.VALUE, + description=( + "Define a set of placement group bundles to be " + "scheduled *for each replica* of this deployment. The replica actor will " + "be scheduled in the first bundle provided, so the resources specified in " + "`ray_actor_options` must be a subset of the first bundle's resources. All " + "actors and tasks created by the replica actor will be scheduled in the " + "placement group by default (`placement_group_capture_child_tasks` is set " + "to True)." + ), + ) + + placement_group_strategy: str = Field( + default=DEFAULT.VALUE, + description=( + "Strategy to use for the replica placement group " + "specified via `placement_group_bundles`. Defaults to `PACK`." + ), + ) + + max_replicas_per_node: int = Field( + default=DEFAULT.VALUE, + description=( + "The max number of replicas of this deployment that can run on a single " + "Valid values are None (default, no limit) or an integer in the range of " + "[1, 100]. " + ), + ) + logging_config: LoggingConfig = Field( + default=DEFAULT.VALUE, + description="Logging config for configuring serve deployment logs.", + ) + + @root_validator + def validate_num_replicas_and_autoscaling_config(cls, values): + num_replicas = values.get("num_replicas", None) + autoscaling_config = values.get("autoscaling_config", None) + + # Cannot have `num_replicas` be an int and a non-null + # autoscaling config + if isinstance(num_replicas, int): + if autoscaling_config not in [None, DEFAULT.VALUE]: + raise ValueError( + "Manually setting num_replicas is not allowed " + "when autoscaling_config is provided." + ) + # A null `num_replicas` or `num_replicas="auto"` can be paired + # with a non-null autoscaling_config + elif num_replicas not in ["auto", None, DEFAULT.VALUE]: + raise ValueError( + f'`num_replicas` must be an int or "auto", but got: {num_replicas}' + ) + + return values + + @root_validator + def validate_max_replicas_per_node_and_placement_group_bundles(cls, values): + max_replicas_per_node = values.get("max_replicas_per_node", None) + placement_group_bundles = values.get("placement_group_bundles", None) + + if max_replicas_per_node not in [ + DEFAULT.VALUE, + None, + ] and placement_group_bundles not in [DEFAULT.VALUE, None]: + raise ValueError( + "Setting max_replicas_per_node is not allowed when " + "placement_group_bundles is provided." + ) + + return values + + @root_validator + def validate_max_queued_requests(cls, values): + max_queued_requests = values.get("max_queued_requests", None) + if max_queued_requests is None or max_queued_requests == DEFAULT.VALUE: + return values + + if max_queued_requests < 1 and max_queued_requests != -1: + raise ValueError( + "max_queued_requests must be -1 (no limit) or a positive integer." + ) + + return values + + def _get_user_configured_option_names(self) -> Set[str]: + """Get set of names for all user-configured options. + + Any field not set to DEFAULT.VALUE is considered a user-configured option. + """ + + return { + field for field, value in self.dict().items() if value is not DEFAULT.VALUE + } + + +def _deployment_info_to_schema(name: str, info: DeploymentInfo) -> DeploymentSchema: + """Converts a DeploymentInfo object to DeploymentSchema.""" + + schema = DeploymentSchema( + name=name, + max_ongoing_requests=info.deployment_config.max_ongoing_requests, + max_queued_requests=info.deployment_config.max_queued_requests, + user_config=info.deployment_config.user_config, + graceful_shutdown_wait_loop_s=( + info.deployment_config.graceful_shutdown_wait_loop_s + ), + graceful_shutdown_timeout_s=info.deployment_config.graceful_shutdown_timeout_s, + health_check_period_s=info.deployment_config.health_check_period_s, + health_check_timeout_s=info.deployment_config.health_check_timeout_s, + ray_actor_options=info.replica_config.ray_actor_options, + ) + + if info.deployment_config.autoscaling_config is not None: + schema.autoscaling_config = info.deployment_config.autoscaling_config.dict() + else: + schema.num_replicas = info.deployment_config.num_replicas + + return schema + + +@PublicAPI(stability="stable") +class ServeApplicationSchema(BaseModel): + """ + Describes one Serve application, and currently can also be used as a standalone + config to deploy a single application to a Ray cluster. + """ + + name: str = Field( + default=SERVE_DEFAULT_APP_NAME, + description=( + "Application name, the name should be unique within the serve instance" + ), + ) + route_prefix: Optional[str] = Field( + default="/", + description=( + "Route prefix for HTTP requests. If not provided, it will use" + "route_prefix of the ingress deployment. By default, the ingress route " + "prefix is '/'." + ), + ) + import_path: str = Field( + ..., + description=( + "An import path to a bound deployment node. Should be of the " + 'form "module.submodule_1...submodule_n.' + 'dag_node". This is equivalent to ' + '"from module.submodule_1...submodule_n import ' + 'dag_node". Only works with Python ' + "applications. This field is REQUIRED when deploying Serve config " + "to a Ray cluster." + ), + ) + runtime_env: dict = Field( + default={}, + description=( + "The runtime_env that the deployment graph will be run in. " + "Per-deployment runtime_envs will inherit from this. working_dir " + "and py_modules may contain only remote URIs." + ), + ) + host: str = Field( + default="0.0.0.0", + description=( + "Host for HTTP servers to listen on. Defaults to " + '"0.0.0.0", which exposes Serve publicly. Cannot be updated once ' + "your Serve application has started running. The Serve application " + "must be shut down and restarted with the new host instead." + ), + ) + port: int = Field( + default=8000, + description=( + "Port for HTTP server. Defaults to 8000. Cannot be updated once " + "your Serve application has started running. The Serve application " + "must be shut down and restarted with the new port instead." + ), + ) + deployments: List[DeploymentSchema] = Field( + default=[], + description="Deployment options that override options specified in the code.", + ) + args: Dict = Field( + default={}, + description="Arguments that will be passed to the application builder.", + ) + logging_config: LoggingConfig = Field( + default=None, + description="Logging config for configuring serve application logs.", + ) + + @property + def deployment_names(self) -> List[str]: + return [d.name for d in self.deployments] + + @validator("runtime_env") + def runtime_env_contains_remote_uris(cls, v): + # Ensure that all uris in py_modules and working_dir are remote. + if v is None: + return + + uris = v.get("py_modules", []) + if "working_dir" in v and v["working_dir"] not in uris: + uris.append(v["working_dir"]) + + for uri in uris: + if uri is not None: + try: + parse_uri(uri) + except ValueError as e: + raise ValueError( + "runtime_envs in the Serve config support only " + "remote URIs in working_dir and py_modules. Got " + f"error when parsing URI: {e}" + ) + + return v + + @validator("import_path") + def import_path_format_valid(cls, v: str): + if v is None: + return + + if ":" in v: + if v.count(":") > 1: + raise ValueError( + f'Got invalid import path "{v}". An ' + "import path may have at most one colon." + ) + if v.rfind(":") == 0 or v.rfind(":") == len(v) - 1: + raise ValueError( + f'Got invalid import path "{v}". An ' + "import path may not start or end with a colon." + ) + return v + else: + if v.count(".") < 1: + raise ValueError( + f'Got invalid import path "{v}". An ' + "import path must contain at least on dot or colon " + "separating the module (and potentially submodules) from " + 'the deployment graph. E.g.: "module.deployment_graph".' + ) + if v.rfind(".") == 0 or v.rfind(".") == len(v) - 1: + raise ValueError( + f'Got invalid import path "{v}". An ' + "import path may not start or end with a dot." + ) + + return v + + @staticmethod + def get_empty_schema_dict() -> Dict: + """Returns an empty app schema dictionary. + + Schema can be used as a representation of an empty Serve application config. + """ + + return { + "import_path": "", + "runtime_env": {}, + "deployments": [], + } + + +@PublicAPI(stability="alpha") +class gRPCOptionsSchema(BaseModel): + """Options to start the gRPC Proxy with.""" + + port: int = Field( + default=DEFAULT_GRPC_PORT, + description=( + "Port for gRPC server. Defaults to 9000. Cannot be updated once " + "Serve has started running. Serve must be shut down and restarted " + "with the new port instead." + ), + ) + grpc_servicer_functions: List[str] = Field( + default=[], + description=( + "List of import paths for gRPC `add_servicer_to_server` functions to add " + "to Serve's gRPC proxy. Default to empty list, which means no gRPC methods " + "will be added and no gRPC server will be started. The servicer functions " + "need to be importable from the context of where Serve is running." + ), + ) + + +@PublicAPI(stability="stable") +class HTTPOptionsSchema(BaseModel): + """Options to start the HTTP Proxy with. + + NOTE: This config allows extra parameters to make it forward-compatible (ie + older versions of Serve are able to accept configs from a newer versions, + simply ignoring new parameters). + """ + + host: str = Field( + default="0.0.0.0", + description=( + "Host for HTTP servers to listen on. Defaults to " + '"0.0.0.0", which exposes Serve publicly. Cannot be updated once ' + "Serve has started running. Serve must be shut down and restarted " + "with the new host instead." + ), + ) + port: int = Field( + default=8000, + description=( + "Port for HTTP server. Defaults to 8000. Cannot be updated once " + "Serve has started running. Serve must be shut down and restarted " + "with the new port instead." + ), + ) + root_path: str = Field( + default="", + description=( + 'Root path to mount the serve application (for example, "/serve"). All ' + 'deployment routes will be prefixed with this path. Defaults to "".' + ), + ) + request_timeout_s: float = Field( + default=None, + description="The timeout for HTTP requests. Defaults to no timeout.", + ) + keep_alive_timeout_s: int = Field( + default=DEFAULT_UVICORN_KEEP_ALIVE_TIMEOUT_S, + description="The HTTP proxy will keep idle connections alive for this duration " + "before closing them when no requests are ongoing. Defaults to " + f"{DEFAULT_UVICORN_KEEP_ALIVE_TIMEOUT_S} seconds.", + ) + + +@PublicAPI(stability="stable") +class ServeDeploySchema(BaseModel): + """ + Multi-application config for deploying a list of Serve applications to the Ray + cluster. + + This is the request JSON schema for the v2 REST API + `PUT "/api/serve/applications/"`. + + NOTE: This config allows extra parameters to make it forward-compatible (ie + older versions of Serve are able to accept configs from a newer versions, + simply ignoring new parameters) + """ + + proxy_location: ProxyLocation = Field( + default=ProxyLocation.EveryNode, + description=( + "Config for where to run proxies for ingress traffic to the cluster." + ), + ) + http_options: HTTPOptionsSchema = Field( + default=HTTPOptionsSchema(), description="Options to start the HTTP Proxy with." + ) + grpc_options: gRPCOptionsSchema = Field( + default=gRPCOptionsSchema(), description="Options to start the gRPC Proxy with." + ) + logging_config: LoggingConfig = Field( + default=None, + description="Logging config for configuring serve components logs.", + ) + applications: List[ServeApplicationSchema] = Field( + ..., description="The set of applications to run on the Ray cluster." + ) + target_capacity: Optional[float] = TARGET_CAPACITY_FIELD + + @validator("applications") + def application_names_unique(cls, v): + # Ensure there are no duplicate applications listed + names = [app.name for app in v] + duplicates = {f'"{name}"' for name in names if names.count(name) > 1} + if len(duplicates): + apps_str = ("application " if len(duplicates) == 1 else "applications ") + ( + ", ".join(duplicates) + ) + raise ValueError( + f"Found multiple configs for {apps_str}. Please remove all duplicates." + ) + return v + + @validator("applications") + def application_routes_unique(cls, v): + # Ensure each application with a non-null route prefix has unique route prefixes + routes = [app.route_prefix for app in v if app.route_prefix is not None] + duplicates = {f'"{route}"' for route in routes if routes.count(route) > 1} + if len(duplicates): + routes_str = ( + "route prefix " if len(duplicates) == 1 else "route prefixes " + ) + (", ".join(duplicates)) + raise ValueError( + f"Found duplicate applications for {routes_str}. Please ensure each " + "application's route_prefix is unique." + ) + return v + + @validator("applications") + def application_names_nonempty(cls, v): + for app in v: + if len(app.name) == 0: + raise ValueError("Application names must be nonempty.") + return v + + @root_validator + def nested_host_and_port(cls, values): + # TODO (zcin): ServeApplicationSchema still needs to have host and port + # fields to support single-app mode, but in multi-app mode the host and port + # fields at the top-level deploy config is used instead. Eventually, after + # migration, we should remove these fields from ServeApplicationSchema. + for app_config in values.get("applications"): + if "host" in app_config.dict(exclude_unset=True): + raise ValueError( + f'Host "{app_config.host}" is set in the config for application ' + f"`{app_config.name}`. Please remove it and set host in the top " + "level deploy config only." + ) + if "port" in app_config.dict(exclude_unset=True): + raise ValueError( + f"Port {app_config.port} is set in the config for application " + f"`{app_config.name}`. Please remove it and set port in the top " + "level deploy config only." + ) + return values + + @staticmethod + def get_empty_schema_dict() -> Dict: + """Returns an empty deploy schema dictionary. + + Schema can be used as a representation of an empty Serve deploy config. + """ + + return {"applications": []} + + +# Keep in sync with ServeSystemActorStatus in +# python/ray/dashboard/client/src/type/serve.ts +@PublicAPI(stability="stable") +class ProxyStatus(str, Enum): + """The current status of the proxy.""" + + STARTING = "STARTING" + HEALTHY = "HEALTHY" + UNHEALTHY = "UNHEALTHY" + DRAINING = "DRAINING" + # The DRAINED status is a momentary state + # just before the proxy is removed + # so this status won't show up on the dashboard. + DRAINED = "DRAINED" + + +@PublicAPI(stability="alpha") +@dataclass +class DeploymentStatusOverview: + """Describes the status of a deployment. + + Attributes: + status: The current status of the deployment. + replica_states: A map indicating how many replicas there are of + each replica state. + message: A message describing the deployment status in more + detail. + """ + + status: DeploymentStatus + status_trigger: DeploymentStatusTrigger + replica_states: Dict[ReplicaState, int] + message: str + + +@PublicAPI(stability="stable") +class ApplicationStatus(str, Enum): + """The current status of the application.""" + + NOT_STARTED = "NOT_STARTED" + DEPLOYING = "DEPLOYING" + DEPLOY_FAILED = "DEPLOY_FAILED" + RUNNING = "RUNNING" + UNHEALTHY = "UNHEALTHY" + DELETING = "DELETING" + + +@PublicAPI(stability="alpha") +@dataclass +class ApplicationStatusOverview: + """Describes the status of an application and all its deployments. + + Attributes: + status: The current status of the application. + message: A message describing the application status in more + detail. + last_deployed_time_s: The time at which the application was + deployed. A Unix timestamp in seconds. + deployments: The deployments in this application. + """ + + status: ApplicationStatus + message: str + last_deployed_time_s: float + deployments: Dict[str, DeploymentStatusOverview] + + +@PublicAPI(stability="alpha") +@dataclass(eq=True) +class ServeStatus: + """Describes the status of Serve. + + Attributes: + proxies: The proxy actors running on each node in the cluster. + A map from node ID to proxy status. + applications: The live applications in the cluster. + target_capacity: the target capacity percentage for all replicas across the + cluster. + """ + + proxies: Dict[str, ProxyStatus] = field(default_factory=dict) + applications: Dict[str, ApplicationStatusOverview] = field(default_factory=dict) + target_capacity: Optional[float] = TARGET_CAPACITY_FIELD + + +@PublicAPI(stability="stable") +class ServeActorDetails(BaseModel, frozen=True): + """Detailed info about a Ray Serve actor. + + Attributes: + node_id: ID of the node that the actor is running on. + node_ip: IP address of the node that the actor is running on. + actor_id: Actor ID. + actor_name: Actor name. + worker_id: Worker ID. + log_file_path: The relative path to the Serve actor's log file from the ray logs + directory. + """ + + node_id: Optional[str] = Field( + description="ID of the node that the actor is running on." + ) + node_ip: Optional[str] = Field( + description="IP address of the node that the actor is running on." + ) + actor_id: Optional[str] = Field(description="Actor ID.") + actor_name: Optional[str] = Field(description="Actor name.") + worker_id: Optional[str] = Field(description="Worker ID.") + log_file_path: Optional[str] = Field( + description=( + "The relative path to the Serve actor's log file from the ray logs " + "directory." + ) + ) + + +@PublicAPI(stability="stable") +class ReplicaDetails(ServeActorDetails, frozen=True): + """Detailed info about a single deployment replica.""" + + replica_id: str = Field(description="Unique ID for the replica.") + state: ReplicaState = Field(description="Current state of the replica.") + pid: Optional[int] = Field(description="PID of the replica actor process.") + start_time_s: float = Field( + description=( + "The time at which the replica actor was started. If the controller dies, " + "this is the time at which the controller recovers and retrieves replica " + "state from the running replica actor." + ) + ) + + +@PublicAPI(stability="stable") +class DeploymentDetails(BaseModel, extra=Extra.forbid, frozen=True): + """ + Detailed info about a deployment within a Serve application. + """ + + name: str = Field(description="Deployment name.") + status: DeploymentStatus = Field( + description="The current status of the deployment." + ) + status_trigger: DeploymentStatusTrigger = Field( + description="[EXPERIMENTAL] The trigger for the current status.", + ) + message: str = Field( + description=( + "If there are issues with the deployment, this will describe the issue in " + "more detail." + ) + ) + deployment_config: DeploymentSchema = Field( + description=( + "The set of deployment config options that are currently applied to this " + "deployment. These options may come from the user's code, config file " + "options, or Serve default values." + ) + ) + target_num_replicas: NonNegativeInt = Field( + description=( + "The current target number of replicas for this deployment. This can " + "change over time for autoscaling deployments, but will remain a constant " + "number for other deployments." + ) + ) + replicas: List[ReplicaDetails] = Field( + description="Details about the live replicas of this deployment." + ) + + +@PublicAPI(stability="alpha") +class APIType(str, Enum): + """Tracks the type of API that an application originates from.""" + + UNKNOWN = "unknown" + IMPERATIVE = "imperative" + DECLARATIVE = "declarative" + + +@PublicAPI(stability="stable") +class ApplicationDetails(BaseModel, extra=Extra.forbid, frozen=True): + """Detailed info about a Serve application.""" + + name: str = Field(description="Application name.") + route_prefix: Optional[str] = Field( + ..., + description=( + "This is the `route_prefix` of the ingress deployment in the application. " + "Requests to paths under this HTTP path prefix will be routed to this " + "application. This value may be null if the application is deploying " + "and app information has not yet fully propagated in the backend; or " + "if the user explicitly set the prefix to `None`, so the application isn't " + "exposed over HTTP. Routing is done based on longest-prefix match, so if " + 'you have deployment A with a prefix of "/a" and deployment B with a ' + 'prefix of "/a/b", requests to "/a", "/a/", and "/a/c" go to A and ' + 'requests to "/a/b", "/a/b/", and "/a/b/c" go to B. Routes must not end ' + 'with a "/" unless they\'re the root (just "/"), which acts as a catch-all.' + ), + ) + docs_path: Optional[str] = Field( + ..., + description=( + "The path at which the docs for this application is served, for instance " + "the `docs_url` for FastAPI-integrated applications." + ), + ) + status: ApplicationStatus = Field( + description="The current status of the application." + ) + message: str = Field( + description="A message that gives more insight into the application status." + ) + last_deployed_time_s: float = Field( + description="The time at which the application was deployed." + ) + deployed_app_config: Optional[ServeApplicationSchema] = Field( + description=( + "The exact copy of the application config that was submitted to the " + "cluster. This will include all of, and only, the options that were " + "explicitly specified in the submitted config. Default values for " + "unspecified options will not be displayed, and deployments that are part " + "of the application but unlisted in the config will also not be displayed. " + "Note that default values for unspecified options are applied to the " + "cluster under the hood, and deployments that were unlisted will still be " + "deployed. This config simply avoids cluttering with unspecified fields " + "for readability." + ) + ) + source: APIType = Field( + description=( + "The type of API that the application originates from. " + "This is a Developer API that is subject to change." + ), + ) + deployments: Dict[str, DeploymentDetails] = Field( + description="Details about the deployments in this application." + ) + + application_details_route_prefix_format = validator( + "route_prefix", allow_reuse=True + )(_route_prefix_format) + + +@PublicAPI(stability="stable") +class ProxyDetails(ServeActorDetails, frozen=True): + """Detailed info about a Ray Serve ProxyActor. + + Attributes: + status: The current status of the proxy. + """ + + status: ProxyStatus = Field(description="Current status of the proxy.") + + +@PublicAPI(stability="stable") +class ServeInstanceDetails(BaseModel, extra=Extra.forbid): + """ + Serve metadata with system-level info and details on all applications deployed to + the Ray cluster. + + This is the response JSON schema for v2 REST API `GET /api/serve/applications`. + """ + + controller_info: ServeActorDetails = Field( + description="Details about the Serve controller actor." + ) + proxy_location: Optional[ProxyLocation] = Field( + description=( + "Config for where to run proxies for ingress traffic to the cluster.\n" + '- "Disabled": disable the proxies entirely.\n' + '- "HeadOnly": run only one proxy on the head node.\n' + '- "EveryNode": run proxies on every node that has at least one replica.\n' + ), + ) + http_options: Optional[HTTPOptionsSchema] = Field(description="HTTP Proxy options.") + grpc_options: Optional[gRPCOptionsSchema] = Field(description="gRPC Proxy options.") + proxies: Dict[str, ProxyDetails] = Field( + description=( + "Mapping from node_id to details about the Proxy running on that node." + ) + ) + deploy_mode: ServeDeployMode = Field( + default=ServeDeployMode.MULTI_APP, + description=( + "[DEPRECATED]: single-app configs are removed, so this is always " + "MULTI_APP. This field will be removed in a future release." + ), + ) + applications: Dict[str, ApplicationDetails] = Field( + description="Details about all live applications running on the cluster." + ) + target_capacity: Optional[float] = TARGET_CAPACITY_FIELD + + @staticmethod + def get_empty_schema_dict() -> Dict: + """Empty Serve instance details dictionary. + + Represents no Serve instance running on the cluster. + """ + + return { + "deploy_mode": "MULTI_APP", + "controller_info": {}, + "proxies": {}, + "applications": {}, + "target_capacity": None, + } + + def _get_status(self) -> ServeStatus: + return ServeStatus( + target_capacity=self.target_capacity, + proxies={node_id: proxy.status for node_id, proxy in self.proxies.items()}, + applications={ + app_name: ApplicationStatusOverview( + status=app.status, + message=app.message, + last_deployed_time_s=app.last_deployed_time_s, + deployments={ + deployment_name: DeploymentStatusOverview( + status=deployment.status, + status_trigger=deployment.status_trigger, + replica_states=dict( + Counter([r.state.value for r in deployment.replicas]) + ), + message=deployment.message, + ) + for deployment_name, deployment in app.deployments.items() + }, + ) + for app_name, app in self.applications.items() + }, + ) + + def _get_user_facing_json_serializable_dict( + self, *args, **kwargs + ) -> Dict[str, Any]: + """Generates json serializable dictionary with user facing data.""" + values = super().dict(*args, **kwargs) + + # `serialized_policy_def` is only used internally and should not be exposed to + # the REST api. This method iteratively removes it from each autoscaling config + # if exists. + for app_name, application in values["applications"].items(): + for deployment_name, deployment in application["deployments"].items(): + if ( + "deployment_config" in deployment + and "autoscaling_config" in deployment["deployment_config"] + ): + deployment["deployment_config"]["autoscaling_config"].pop( + "_serialized_policy_def", None + ) + + return values diff --git a/.venv/lib/python3.11/site-packages/ray/serve/scripts.py b/.venv/lib/python3.11/site-packages/ray/serve/scripts.py new file mode 100644 index 0000000000000000000000000000000000000000..64c2f362ee2ca58aee2cde9fc2fea36a087084ab --- /dev/null +++ b/.venv/lib/python3.11/site-packages/ray/serve/scripts.py @@ -0,0 +1,886 @@ +#!/usr/bin/env python +import os +import pathlib +import re +import sys +import time +import traceback +from dataclasses import asdict +from typing import Any, Dict, List, Optional, Tuple + +import click +import watchfiles +import yaml + +import ray +from ray import serve +from ray._private.utils import import_attr +from ray.autoscaler._private.cli_logger import cli_logger +from ray.dashboard.modules.dashboard_sdk import parse_runtime_env_args +from ray.dashboard.modules.serve.sdk import ServeSubmissionClient +from ray.serve._private import api as _private_api +from ray.serve._private.build_app import BuiltApplication, build_app +from ray.serve._private.constants import ( + DEFAULT_GRPC_PORT, + DEFAULT_HTTP_HOST, + DEFAULT_HTTP_PORT, + SERVE_DEFAULT_APP_NAME, + SERVE_NAMESPACE, +) +from ray.serve.config import DeploymentMode, ProxyLocation, gRPCOptions +from ray.serve.deployment import Application, deployment_to_schema +from ray.serve.schema import ( + LoggingConfig, + ServeApplicationSchema, + ServeDeploySchema, + ServeInstanceDetails, +) + +APP_DIR_HELP_STR = ( + "Local directory to look for the IMPORT_PATH (will be inserted into " + "PYTHONPATH). Defaults to '.', meaning that an object in ./main.py " + "can be imported as 'main.object'. Not relevant if you're importing " + "from an installed module." +) +RAY_INIT_ADDRESS_HELP_STR = ( + "Address to use for ray.init(). Can also be set using " + "the RAY_ADDRESS environment variable." +) +RAY_DASHBOARD_ADDRESS_HELP_STR = ( + "Address for the Ray dashboard. Defaults to http://localhost:8265. " + "Can also be set using the RAY_DASHBOARD_ADDRESS environment variable." +) + + +# See https://stackoverflow.com/a/33300001/11162437 +def str_presenter(dumper: yaml.Dumper, data): + """ + A custom representer to write multi-line strings in block notation using a literal + style. + + Ensures strings with newline characters print correctly. + """ + + if len(data.splitlines()) > 1: + return dumper.represent_scalar("tag:yaml.org,2002:str", data, style="|") + return dumper.represent_scalar("tag:yaml.org,2002:str", data) + + +# See https://stackoverflow.com/a/14693789/11162437 +def remove_ansi_escape_sequences(input: str): + """Removes ANSI escape sequences in a string""" + ansi_escape = re.compile( + r""" + \x1B # ESC + (?: # 7-bit C1 Fe (except CSI) + [@-Z\\-_] + | # or [ for CSI, followed by a control sequence + \[ + [0-?]* # Parameter bytes + [ -/]* # Intermediate bytes + [@-~] # Final byte + ) + """, + re.VERBOSE, + ) + + return ansi_escape.sub("", input) + + +def process_dict_for_yaml_dump(data): + """ + Removes ANSI escape sequences recursively for all strings in dict. + + We often need to use yaml.dump() to print dictionaries that contain exception + tracebacks, which can contain ANSI escape sequences that color printed text. However + yaml.dump() will format the tracebacks incorrectly if ANSI escape sequences are + present, so we need to remove them before dumping. + """ + + for k, v in data.items(): + if isinstance(v, dict): + data[k] = process_dict_for_yaml_dump(v) + if isinstance(v, list): + data[k] = [process_dict_for_yaml_dump(item) for item in v] + elif isinstance(v, str): + data[k] = remove_ansi_escape_sequences(v) + + return data + + +def convert_args_to_dict(args: Tuple[str]) -> Dict[str, str]: + args_dict = dict() + for arg in args: + split = arg.split("=", maxsplit=1) + if len(split) != 2 or len(split[1]) == 0: + raise click.ClickException( + f"Invalid application argument '{arg}', " + "must be of the form '='." + ) + + args_dict[split[0]] = split[1] + + return args_dict + + +def warn_if_agent_address_set(): + if "RAY_AGENT_ADDRESS" in os.environ: + cli_logger.warning( + "The `RAY_AGENT_ADDRESS` env var has been deprecated in favor of " + "the `RAY_DASHBOARD_ADDRESS` env var. The `RAY_AGENT_ADDRESS` is " + "ignored." + ) + + +@click.group( + help="CLI for managing Serve applications on a Ray cluster.", + context_settings=dict(help_option_names=["--help", "-h"]), +) +def cli(): + pass + + +@cli.command(help="Start Serve on the Ray cluster.") +@click.option( + "--address", + "-a", + default=os.environ.get("RAY_ADDRESS", "auto"), + required=False, + type=str, + help=RAY_INIT_ADDRESS_HELP_STR, +) +@click.option( + "--http-host", + default=DEFAULT_HTTP_HOST, + required=False, + type=str, + help="Host for HTTP proxies to listen on. " f"Defaults to {DEFAULT_HTTP_HOST}.", +) +@click.option( + "--http-port", + default=DEFAULT_HTTP_PORT, + required=False, + type=int, + help="Port for HTTP proxies to listen on. " f"Defaults to {DEFAULT_HTTP_PORT}.", +) +@click.option( + "--http-location", + default=DeploymentMode.HeadOnly, + required=False, + type=click.Choice(list(DeploymentMode)), + help="DEPRECATED: Use `--proxy-location` instead.", +) +@click.option( + "--proxy-location", + default=ProxyLocation.EveryNode, + required=False, + type=click.Choice(list(ProxyLocation)), + help="Location of the proxies. Defaults to EveryNode.", +) +@click.option( + "--grpc-port", + default=DEFAULT_GRPC_PORT, + required=False, + type=int, + help="Port for gRPC proxies to listen on. " f"Defaults to {DEFAULT_GRPC_PORT}.", +) +@click.option( + "--grpc-servicer-functions", + default=[], + required=False, + multiple=True, + help="Servicer function for adding the method handler to the gRPC server. " + "Defaults to an empty list and no gRPC server is started.", +) +def start( + address, + http_host, + http_port, + http_location, + proxy_location, + grpc_port, + grpc_servicer_functions, +): + if http_location != DeploymentMode.HeadOnly: + cli_logger.warning( + "The `--http-location` flag to `serve start` is deprecated, " + "use `--proxy-location` instead." + ) + + proxy_location = http_location + + ray.init( + address=address, + namespace=SERVE_NAMESPACE, + ) + serve.start( + proxy_location=proxy_location, + http_options=dict( + host=http_host, + port=http_port, + ), + grpc_options=gRPCOptions( + port=grpc_port, + grpc_servicer_functions=grpc_servicer_functions, + ), + ) + + +def _generate_config_from_file_or_import_path( + config_or_import_path: str, + *, + name: Optional[str], + arguments: Dict[str, str], + runtime_env: Optional[Dict[str, Any]], +) -> ServeDeploySchema: + """Generates a deployable config schema for the passed application(s).""" + if pathlib.Path(config_or_import_path).is_file(): + config_path = config_or_import_path + cli_logger.print(f"Deploying from config file: '{config_path}'.") + if len(arguments) > 0: + raise click.ClickException( + "Application arguments cannot be specified for a config file." + ) + + # TODO(edoakes): runtime_env is silently ignored -- should we enable overriding? + with open(config_path, "r") as config_file: + config_dict = yaml.safe_load(config_file) + config = ServeDeploySchema.parse_obj(config_dict) + else: + # TODO(edoakes): should we default to --working-dir="." for this? + import_path = config_or_import_path + cli_logger.print(f"Deploying from import path: '{import_path}'.") + + app = ServeApplicationSchema( + import_path=import_path, + runtime_env=runtime_env, + args=arguments, + ) + if name is not None: + app.name = name + config = ServeDeploySchema(applications=[app]) + + return config + + +@cli.command( + short_help="Deploy an application or group of applications.", + help=( + "Deploy an application from an import path (e.g., main:app) " + "or a group of applications from a YAML config file.\n\n" + "Passed import paths must point to an Application object or " + "a function that returns one. If a function is used, arguments can be " + "passed to it in 'key=val' format after the import path, for example:\n\n" + "serve deploy main:app model_path='/path/to/model.pkl' num_replicas=5\n\n" + "This command makes a REST API request to a running Ray cluster." + ), +) +@click.argument("config_or_import_path") +@click.argument("arguments", nargs=-1, required=False) +@click.option( + "--runtime-env", + type=str, + default=None, + required=False, + help="Path to a local YAML file containing a runtime_env definition.", +) +@click.option( + "--runtime-env-json", + type=str, + default=None, + required=False, + help="JSON-serialized runtime_env dictionary.", +) +@click.option( + "--working-dir", + type=str, + default=None, + required=False, + help=( + "Directory containing files that your application(s) will run in. This must " + "be a remote URI to a .zip file (e.g., S3 bucket). This overrides the " + "working_dir in --runtime-env if both are specified." + ), +) +@click.option( + "--name", + required=False, + default=None, + type=str, + help="Custom name for the application. Ignored when deploying from a config file.", +) +@click.option( + "--address", + "-a", + default=os.environ.get("RAY_DASHBOARD_ADDRESS", "http://localhost:8265"), + required=False, + type=str, + help=RAY_DASHBOARD_ADDRESS_HELP_STR, +) +def deploy( + config_or_import_path: str, + arguments: Tuple[str], + runtime_env: str, + runtime_env_json: str, + working_dir: str, + name: Optional[str], + address: str, +): + args_dict = convert_args_to_dict(arguments) + final_runtime_env = parse_runtime_env_args( + runtime_env=runtime_env, + runtime_env_json=runtime_env_json, + working_dir=working_dir, + ) + + config = _generate_config_from_file_or_import_path( + config_or_import_path, + name=name, + arguments=args_dict, + runtime_env=final_runtime_env, + ) + + ServeSubmissionClient(address).deploy_applications( + config.dict(exclude_unset=True), + ) + cli_logger.success( + "\nSent deploy request successfully.\n " + "* Use `serve status` to check applications' statuses.\n " + "* Use `serve config` to see the current application config(s).\n" + ) + + +@cli.command( + short_help="Run an application or group of applications.", + help=( + "Run an application from an import path (e.g., my_script:" + "app) or a group of applications from a YAML config file.\n\n" + "Passed import paths must point to an Application object or " + "a function that returns one. If a function is used, arguments can be " + "passed to it in 'key=val' format after the import path, for example:\n\n" + "serve run my_script:app model_path='/path/to/model.pkl' num_replicas=5\n\n" + "If passing a YAML config, existing applications with no code changes will not " + "be updated.\n\n" + "By default, this will block and stream logs to the console. If you " + "Ctrl-C the command, it will shut down Serve on the cluster." + ), +) +@click.argument("config_or_import_path") +@click.argument("arguments", nargs=-1, required=False) +@click.option( + "--runtime-env", + type=str, + default=None, + required=False, + help="Path to a local YAML file containing a runtime_env definition. " + "This will be passed to ray.init() as the default for deployments.", +) +@click.option( + "--runtime-env-json", + type=str, + default=None, + required=False, + help="JSON-serialized runtime_env dictionary. This will be passed to " + "ray.init() as the default for deployments.", +) +@click.option( + "--working-dir", + type=str, + default=None, + required=False, + help=( + "Directory containing files that your application(s) will run in. Can be a " + "local directory or a remote URI to a .zip file (S3, GS, HTTP). " + "This overrides the working_dir in --runtime-env if both are " + "specified. This will be passed to ray.init() as the default for " + "deployments." + ), +) +@click.option( + "--app-dir", + "-d", + default=".", + type=str, + help=APP_DIR_HELP_STR, +) +@click.option( + "--address", + "-a", + default=os.environ.get("RAY_ADDRESS", None), + required=False, + type=str, + help=RAY_INIT_ADDRESS_HELP_STR, +) +@click.option( + "--blocking/--non-blocking", + default=True, + help=( + "Whether or not this command should be blocking. If blocking, it " + "will loop and log status until Ctrl-C'd, then clean up the app." + ), +) +@click.option( + "--reload", + "-r", + is_flag=True, + help=( + "This is an experimental feature - Listens for changes to files in the working directory, " + "--working-dir or the working_dir in the --runtime-env, and automatically redeploys " + "the application. This will block until Ctrl-C'd, then clean up the " + "app." + ), +) +@click.option( + "--route-prefix", + required=False, + type=str, + default="/", + help=( + "Route prefix for the application. This should only be used " + "when running an application specified by import path and " + "will be ignored if running a config file." + ), +) +@click.option( + "--name", + required=False, + default=SERVE_DEFAULT_APP_NAME, + type=str, + help=( + "Name of the application. This should only be used " + "when running an application specified by import path and " + "will be ignored if running a config file." + ), +) +def run( + config_or_import_path: str, + arguments: Tuple[str], + runtime_env: str, + runtime_env_json: str, + working_dir: str, + app_dir: str, + address: str, + blocking: bool, + reload: bool, + route_prefix: str, + name: str, +): + sys.path.insert(0, app_dir) + args_dict = convert_args_to_dict(arguments) + final_runtime_env = parse_runtime_env_args( + runtime_env=runtime_env, + runtime_env_json=runtime_env_json, + working_dir=working_dir, + ) + + if pathlib.Path(config_or_import_path).is_file(): + if len(args_dict) > 0: + cli_logger.warning( + "Application arguments are ignored when running a config file." + ) + + is_config = True + config_path = config_or_import_path + cli_logger.print(f"Running config file: '{config_path}'.") + + with open(config_path, "r") as config_file: + config_dict = yaml.safe_load(config_file) + + config = ServeDeploySchema.parse_obj(config_dict) + + else: + is_config = False + import_path = config_or_import_path + cli_logger.print(f"Running import path: '{import_path}'.") + app = _private_api.call_user_app_builder_with_args_if_necessary( + import_attr(import_path), args_dict + ) + + # Only initialize ray if it has not happened yet. + if not ray.is_initialized(): + # Setting the runtime_env here will set defaults for the deployments. + ray.init( + address=address, namespace=SERVE_NAMESPACE, runtime_env=final_runtime_env + ) + elif ( + address is not None + and address != "auto" + and address != ray.get_runtime_context().gcs_address + ): + # Warning users the address they passed is different from the existing ray + # instance. + ray_address = ray.get_runtime_context().gcs_address + cli_logger.warning( + "An address was passed to `serve run` but the imported module also " + f"connected to Ray at a different address: '{ray_address}'. You do not " + "need to call `ray.init` in your code when using `serve run`." + ) + + http_options = {"location": "EveryNode"} + grpc_options = gRPCOptions() + # Merge http_options and grpc_options with the ones on ServeDeploySchema. + if is_config and isinstance(config, ServeDeploySchema): + config_http_options = config.http_options.dict() + http_options = {**config_http_options, **http_options} + grpc_options = gRPCOptions(**config.grpc_options.dict()) + + client = _private_api.serve_start( + http_options=http_options, + grpc_options=grpc_options, + ) + + try: + if is_config: + client.deploy_apps(config, _blocking=False) + cli_logger.success("Submitted deploy config successfully.") + if blocking: + while True: + # Block, letting Ray print logs to the terminal. + time.sleep(10) + else: + # This should not block if reload is true so the watchfiles can be triggered + should_block = blocking and not reload + serve.run(app, blocking=should_block, name=name, route_prefix=route_prefix) + + if reload: + if not blocking: + raise click.ClickException( + "The --non-blocking option conflicts with the --reload option." + ) + if working_dir: + watch_dir = working_dir + else: + watch_dir = app_dir + + for changes in watchfiles.watch( + watch_dir, + rust_timeout=10000, + yield_on_timeout=True, + ): + if changes: + try: + # The module needs to be reloaded with `importlib` in order to + # pick up any changes. + app = _private_api.call_user_app_builder_with_args_if_necessary( + import_attr(import_path, reload_module=True), args_dict + ) + serve.run( + target=app, + blocking=False, + name=name, + route_prefix=route_prefix, + ) + except Exception: + traceback.print_exc() + cli_logger.error( + "Deploying the latest version of the application failed." + ) + + except KeyboardInterrupt: + cli_logger.info("Got KeyboardInterrupt, shutting down...") + serve.shutdown() + sys.exit() + + except Exception: + traceback.print_exc() + cli_logger.error( + "Received unexpected error, see console logs for more details. Shutting " + "down..." + ) + serve.shutdown() + sys.exit() + + +@cli.command(help="Gets the current configs of Serve applications on the cluster.") +@click.option( + "--address", + "-a", + default=os.environ.get("RAY_DASHBOARD_ADDRESS", "http://localhost:8265"), + required=False, + type=str, + help=RAY_DASHBOARD_ADDRESS_HELP_STR, +) +@click.option( + "--name", + "-n", + required=False, + type=str, + help=( + "Name of an application. Only applies to multi-application mode. If set, this " + "will only fetch the config for the specified application." + ), +) +def config(address: str, name: Optional[str]): + warn_if_agent_address_set() + + serve_details = ServeInstanceDetails( + **ServeSubmissionClient(address).get_serve_details() + ) + + # Fetch app configs for all live applications on the cluster + if name is None: + print( + "\n---\n\n".join( + yaml.safe_dump( + app.deployed_app_config.dict(exclude_unset=True), + sort_keys=False, + ) + for app in serve_details.applications.values() + if app.deployed_app_config is not None + ), + end="", + ) + # Fetch a specific app config by name. + else: + app = serve_details.applications.get(name) + if app is None or app.deployed_app_config is None: + print(f'No config has been deployed for application "{name}".') + else: + config = app.deployed_app_config.dict(exclude_unset=True) + print(yaml.safe_dump(config, sort_keys=False), end="") + + +@cli.command( + short_help="Get the current status of all Serve applications on the cluster.", + help=( + "Prints status information about all applications on the cluster.\n\n" + "An application may be:\n\n" + "- NOT_STARTED: the application does not exist.\n" + "- DEPLOYING: the deployments in the application are still deploying and " + "haven't reached the target number of replicas.\n" + "- RUNNING: all deployments are healthy.\n" + "- DEPLOY_FAILED: the application failed to deploy or reach a running state.\n" + "- DELETING: the application is being deleted, and the deployments in the " + "application are being teared down.\n\n" + "The deployments within each application may be:\n\n" + "- HEALTHY: all replicas are acting normally and passing their health checks.\n" + "- UNHEALTHY: at least one replica is not acting normally and may not be " + "passing its health check.\n" + "- UPDATING: the deployment is updating." + ), +) +@click.option( + "--address", + "-a", + default=os.environ.get("RAY_DASHBOARD_ADDRESS", "http://localhost:8265"), + required=False, + type=str, + help=RAY_DASHBOARD_ADDRESS_HELP_STR, +) +@click.option( + "--name", + "-n", + default=None, + required=False, + type=str, + help=( + "Name of an application. If set, this will display only the status of the " + "specified application." + ), +) +def status(address: str, name: Optional[str]): + warn_if_agent_address_set() + + serve_details = ServeInstanceDetails( + **ServeSubmissionClient(address).get_serve_details() + ) + status = asdict(serve_details._get_status()) + + # Ensure multi-line strings in app_status is dumped/printed correctly + yaml.SafeDumper.add_representer(str, str_presenter) + + if name is None: + print( + yaml.safe_dump( + # Ensure exception traceback in app_status are printed correctly + process_dict_for_yaml_dump(status), + default_flow_style=False, + sort_keys=False, + ), + end="", + ) + else: + if name not in serve_details.applications: + cli_logger.error(f'Application "{name}" does not exist.') + else: + print( + yaml.safe_dump( + # Ensure exception tracebacks in app_status are printed correctly + process_dict_for_yaml_dump(status["applications"][name]), + default_flow_style=False, + sort_keys=False, + ), + end="", + ) + + +@cli.command( + help="Shuts down Serve on the cluster, deleting all applications.", +) +@click.option( + "--address", + "-a", + default=os.environ.get("RAY_DASHBOARD_ADDRESS", "http://localhost:8265"), + required=False, + type=str, + help=RAY_DASHBOARD_ADDRESS_HELP_STR, +) +@click.option("--yes", "-y", is_flag=True, help="Bypass confirmation prompt.") +def shutdown(address: str, yes: bool): + warn_if_agent_address_set() + + if not yes: + click.confirm( + f"This will shut down Serve on the cluster at address " + f'"{address}" and delete all applications there. Do you ' + "want to continue?", + abort=True, + ) + + ServeSubmissionClient(address).delete_applications() + + cli_logger.success( + "Sent shutdown request; applications will be deleted asynchronously." + ) + + +@cli.command( + short_help="Generate a config file for the specified applications.", + help=( + "Imports the applications at IMPORT_PATHS and generates a structured, multi-" + "application config for them. If the flag --single-app is set, accepts one " + "application and generates a single-application config. Config " + "outputted from this command can be used by `serve deploy` or the REST API. " + ), +) +@click.argument("import_paths", nargs=-1, required=True) +@click.option( + "--app-dir", + "-d", + default=".", + type=str, + help=APP_DIR_HELP_STR, +) +@click.option( + "--output-path", + "-o", + default=None, + type=str, + help=( + "Local path where the output config will be written in YAML format. " + "If not provided, the config will be printed to STDOUT." + ), +) +@click.option( + "--grpc-servicer-functions", + default=[], + required=False, + multiple=True, + help="Servicer function for adding the method handler to the gRPC server. " + "Defaults to an empty list and no gRPC server is started.", +) +def build( + import_paths: Tuple[str], + app_dir: str, + output_path: Optional[str], + grpc_servicer_functions: List[str], +): + sys.path.insert(0, app_dir) + + def build_app_config(import_path: str, name: str = None): + app: Application = import_attr(import_path) + if not isinstance(app, Application): + raise TypeError( + f"Expected '{import_path}' to be an Application but got {type(app)}." + ) + + built_app: BuiltApplication = build_app(app, name=name) + schema = ServeApplicationSchema( + name=name, + route_prefix="/" if len(import_paths) == 1 else f"/{name}", + import_path=import_path, + runtime_env={}, + deployments=[deployment_to_schema(d) for d in built_app.deployments], + ) + + return schema.dict(exclude_unset=True) + + config_str = ( + "# This file was generated using the `serve build` command " + f"on Ray v{ray.__version__}.\n\n" + ) + + app_configs = [] + for app_index, import_path in enumerate(import_paths): + app_configs.append(build_app_config(import_path, name=f"app{app_index + 1}")) + + deploy_config = { + "proxy_location": "EveryNode", + "http_options": { + "host": "0.0.0.0", + "port": 8000, + }, + "grpc_options": { + "port": DEFAULT_GRPC_PORT, + "grpc_servicer_functions": grpc_servicer_functions, + }, + "logging_config": LoggingConfig().dict(), + "applications": app_configs, + } + + # Parse + validate the set of application configs + ServeDeploySchema.parse_obj(deploy_config) + + config_str += yaml.dump( + deploy_config, + Dumper=ServeDeploySchemaDumper, + default_flow_style=False, + sort_keys=False, + ) + cli_logger.info( + "The auto-generated application names default to `app1`, `app2`, ... etc. " + "Rename as necessary.\n", + ) + + # Ensure file ends with only one newline + config_str = config_str.rstrip("\n") + "\n" + + with open(output_path, "w") if output_path else sys.stdout as f: + f.write(config_str) + + +class ServeDeploySchemaDumper(yaml.SafeDumper): + """YAML dumper object with custom formatting for ServeDeploySchema. + + Reformat config to follow this spacing: + --------------------------------------- + + host: 0.0.0.0 + + port: 8000 + + applications: + + - name: app1 + + import_path: app1.path + + runtime_env: {} + + deployments: + + - name: deployment1 + ... + + - name: deployment2 + ... + """ + + def write_line_break(self, data=None): + # https://github.com/yaml/pyyaml/issues/127#issuecomment-525800484 + super().write_line_break(data) + + # Indents must be at most 4 to ensure that only the top 4 levels of + # the config file have line breaks between them. + if len(self.indents) <= 4: + super().write_line_break()